diff --git a/.golangci.yml b/.golangci.yml index 0cd68aa0ae3d8..a9bbccf91bff7 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -209,6 +209,7 @@ linters: path: roxctl/common/io/io\.go # io.go will by default use os.Stdin/os.StdErr. paths: - pkg/complianceoperator/api + - scripts/compliance-operator-importer - third_party$ - builtin$ - examples$ @@ -223,6 +224,7 @@ formatters: generated: lax paths: - pkg/complianceoperator/api + - scripts/compliance-operator-importer - third_party$ - builtin$ - examples$ diff --git a/go.work b/go.work new file mode 100644 index 0000000000000..e44d3c482d5cf --- /dev/null +++ b/go.work @@ -0,0 +1,6 @@ +go 1.25.0 + +use ( + . + ./scripts/compliance-operator-importer +) diff --git a/go.work.sum b/go.work.sum new file mode 100644 index 0000000000000..ee63c5c70873c --- /dev/null +++ b/go.work.sum @@ -0,0 +1,570 @@ +4d63.com/gocheckcompilerdirectives v1.2.1/go.mod h1:yjDJSxmDTtIHHCqX0ufRYZDL6vQtMG7tJdKVeWwsqvs= +4d63.com/gochecknoglobals v0.2.1/go.mod h1:KRE8wtJB3CXCsb1xy421JfTHIIbmT3U5ruxw2Qu8fSU= +bitbucket.org/creachadair/shell v0.0.8/go.mod h1:vINzudofoUXZSJ5tREgpy+Etyjsag3ait5WOWImEVZ0= +bitbucket.org/liamstask/goose v0.0.0-20150115234039-8488cc47d90c/go.mod h1:hSVuE3qU7grINVSwrmzHfpg9k87ALBk+XaualNyUzI4= +buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.36.11-20251209175733-2a1774d88802.1/go.mod h1:tvtbpgaVXZX4g6Pn+AnzFycuRK3MOz5HJfEGeEllXYM= +buf.build/go/protovalidate v1.1.0/go.mod h1:bGZcPiAQDC3ErCHK3t74jSoJDFOs2JH3d7LWuTEIdss= +buf.build/go/protoyaml v0.6.0/go.mod h1:RgUOsBu/GYKLDSIRgQXniXbNgFlGEZnQpRAUdLAFV2Q= +chainguard.dev/go-grpc-kit v0.17.15/go.mod h1:1wAVAX2CCamtFlfMs9PFzfgQQxX1/TQyF6cbWApbJ2U= +chainguard.dev/sdk v0.1.45/go.mod h1:Xq7KQhJHsWAovd8AiWBAj/ftcNkxMPx5YoQeGVTIj2c= +cloud.google.com/go/accessapproval v1.8.8/go.mod h1:RFwPY9JDKseP4gJrX1BlAVsP5O6kI8NdGlTmaeDefmk= +cloud.google.com/go/accesscontextmanager v1.9.7/go.mod h1:i6e0nd5CPcrh7+YwGq4bKvju5YB9sgoAip+mXU73aMM= +cloud.google.com/go/ai v0.8.0/go.mod h1:t3Dfk4cM61sytiggo2UyGsDVW3RF1qGZaUKDrZFyqkE= +cloud.google.com/go/aiplatform v1.120.0/go.mod h1:6mDthfmy0oS1EQhVFdijoxkVdI2+HIZkpuGTBpedeCg= +cloud.google.com/go/analytics v0.30.1/go.mod h1:V/FnINU5kMOsttZnKPnXfKi6clJUHTEXUKQjHxcNK8A= +cloud.google.com/go/apigateway v1.7.7/go.mod h1:j1bCmrUK1BzVHpiIyTApxB7cRyhivKzltqLmp6j6i7U= +cloud.google.com/go/apigeeconnect v1.7.7/go.mod h1:ftGK3nca0JePiVLl0A6alaMjKdOc5C+sAkFMyH2RH8U= +cloud.google.com/go/apigeeregistry v0.10.0/go.mod h1:SAlF5OhKvyLDuwWAaFAIVJjrEqKRrGTPkJs+TWNnSqg= +cloud.google.com/go/appengine v1.9.7/go.mod h1:y1XpGVeAhbsNzHida79cHbr3pFRsym0ob8xnC8yphbo= +cloud.google.com/go/area120 v0.10.0/go.mod h1:Xg3fKl4xU3UVai9wsI1FXwNU8wSCDYT7dFZfwJKViAM= +cloud.google.com/go/asset v1.22.1/go.mod h1:NlvWwmca7CX6BIBEdRNxOocH6DowmBghAAHucOHuHng= +cloud.google.com/go/assuredworkloads v1.13.0/go.mod h1:o/oHEOnUlribR+uJWTKQo8A5RhSl9K9FNeMOew4TJ3M= +cloud.google.com/go/automl v1.15.0/go.mod h1:U9zOtQb8zVrFNGTuW3BfxeqmLyeleLgT9B12EaXfODg= +cloud.google.com/go/baremetalsolution v1.4.0/go.mod h1:K6C6g4aS8LW95I0fEHZiBsBlh0UxwDLGf+S/vyfXbvg= +cloud.google.com/go/batch v1.14.0/go.mod h1:oeQveyG6NDS/ks2ilOP4LzKRmuIaI7GLe0CkR7WF6pk= +cloud.google.com/go/beyondcorp v1.2.0/go.mod h1:sszcgxpPPBEfLzbI0aYCTg6tT1tyt3CmKav3NZIUcvI= +cloud.google.com/go/bigquery v1.74.0/go.mod h1:iViO7Cx3A/cRKcHNRsHB3yqGAMInFBswrE9Pxazsc90= +cloud.google.com/go/bigtable v1.42.0/go.mod h1:oZ30nofVB6/UYGg7lBwGLWSea7NZUvw/WvBBgLY07xU= +cloud.google.com/go/billing v1.21.0/go.mod h1:ZGairB3EVnb3i09E2SxFxo50p5unPaMTuo1jh6jW9js= +cloud.google.com/go/binaryauthorization v1.10.0/go.mod h1:WOuiaQkI4PU/okwrcREjSAr2AUtjQgVe+PlrXKOmKKw= +cloud.google.com/go/certificatemanager v1.9.6/go.mod h1:vWogV874jKZkSRDFCMM3r7wqybv8WXs3XhyNff6o/Zo= +cloud.google.com/go/channel v1.21.0/go.mod h1:8v3TwHtgLmFxTpL2U+e10CLFOQN8u/Vr9RhYcJUS3y8= +cloud.google.com/go/cloudbuild v1.25.0/go.mod h1:lCu+T6IPkobPo2Nw+vCE7wuaAl9HbXLzdPx/tcF+oWo= +cloud.google.com/go/clouddms v1.8.8/go.mod h1:QtCyw+a73dlkDb2q20aTAPvfaTZCepDDi6Gb1AKq0a4= +cloud.google.com/go/cloudtasks v1.13.7/go.mod h1:H0TThOUG+Ml34e2+ZtW6k6nt4i9KuH3nYAJ5mxh7OM4= +cloud.google.com/go/compute v1.54.0 h1:4CKmnpO+40z44bKG5bdcKxQ7ocNpRtOc9SCLLUzze1w= +cloud.google.com/go/compute v1.54.0/go.mod h1:RfBj0L1x/pIM84BrzNX2V21oEv16EKRPBiTcBRRH1Ww= +cloud.google.com/go/contactcenterinsights v1.17.4/go.mod h1:kZe6yOnKDfpPz2GphDHynxk/Spx+53UX/pGf+SmWAKM= +cloud.google.com/go/container v1.46.0/go.mod h1:A7gMqdQduTk46+zssWDTKbGS2z46UsJNXfKqvMI1ZO4= +cloud.google.com/go/datacatalog v1.26.1/go.mod h1:2Qcq8vsHNxMDgjgadRFmFG47Y+uuIVsyEGUrlrKEdrg= +cloud.google.com/go/dataflow v0.11.1/go.mod h1:3s6y/h5Qz7uuxTmKJKBifkYZ3zs63jS+6VGtSu8Cf7Y= +cloud.google.com/go/dataform v0.13.0/go.mod h1:U3fqrPY5jAcFh1a8rQb4a+PQ7zKlc5qfgotFZ+luKPo= +cloud.google.com/go/datafusion v1.8.7/go.mod h1:4dkFb1la41qCEXh1AzYtFwl842bu2ikTUXyKhjvFCb0= +cloud.google.com/go/datalabeling v0.9.7/go.mod h1:EEUVn+wNn3jl19P2S13FqE1s9LsKzRsPuuMRq2CMsOk= +cloud.google.com/go/dataplex v1.28.0/go.mod h1:VB+xlYJiJ5kreonXsa2cHPj0A3CfPh/mgiHG4JFhbUA= +cloud.google.com/go/dataproc/v2 v2.16.0/go.mod h1:HlzFg8k1SK+bJN3Zsy2z5g6OZS1D4DYiDUgJtF0gJnE= +cloud.google.com/go/dataqna v0.9.8/go.mod h1:2lHKmGPOqzzuqCc5NI0+Xrd5om4ulxGwPpLB4AnFgpA= +cloud.google.com/go/datastore v1.22.0/go.mod h1:aopSX+Whx0lHspWWBj+AjWt68/zjYsPfDe3LjWtqZg8= +cloud.google.com/go/datastream v1.15.1/go.mod h1:aV1Grr9LFon0YvqryE5/gF1XAhcau2uxN2OvQJPpqRw= +cloud.google.com/go/deploy v1.27.3/go.mod h1:7LFIYYTSSdljYRqY3n+JSmIFdD4lv6aMD5xg0crB5iw= +cloud.google.com/go/dialogflow v1.76.0/go.mod h1:mdLkMmSCghfcP85X9dFBlirC1OssS65KE5hrrSz2GXY= +cloud.google.com/go/dlp v1.28.0/go.mod h1:C3od1fIK8lf7Kr62aU1Uh0z4OL5Z8s3do3znAiEupAw= +cloud.google.com/go/documentai v1.42.0/go.mod h1:CABOUzRNOuvb/QwJS2LS80Hpqbu3UW2afyRKTYuW7bo= +cloud.google.com/go/domains v0.10.7/go.mod h1:T3WG/QUAO/52z4tUPooKS8AY7yXaFxPYn1V3F0/JbNQ= +cloud.google.com/go/edgecontainer v1.4.4/go.mod h1:yyNVHsCKtsX/0mqFdbljQw0Uo660q2dlMPaiqYiC2Tg= +cloud.google.com/go/errorreporting v0.4.0/go.mod h1:dZGEhqzdHZSRxxWLVjC3Ue5CVaROzvP58D9rU6zbBfw= +cloud.google.com/go/essentialcontacts v1.7.7/go.mod h1:ytycWAEn/aKUMRKQPMVgMrAtphEMgjbzL8vFwM3tqXs= +cloud.google.com/go/eventarc v1.18.0/go.mod h1:/6SDoqh5+9QNUqCX4/oQcJVK16fG/snHBSXu7lrJtO8= +cloud.google.com/go/filestore v1.10.3/go.mod h1:94ZGyLTx9j+aWKozPQ6Wbq1DuImie/L/HIdGMshtwac= +cloud.google.com/go/firestore v1.21.0/go.mod h1:1xH6HNcnkf/gGyR8udd6pFO4Z7GWJSwLKQMx/u6UrP4= +cloud.google.com/go/functions v1.19.7/go.mod h1:xbcKfS7GoIcaXr2FSwmtn9NXal1JR4TV6iYZlgXffwA= +cloud.google.com/go/gkebackup v1.8.1/go.mod h1:GAaAl+O5D9uISH5MnClUop2esQW4pDa2qe/95A4l7YQ= +cloud.google.com/go/gkeconnect v0.12.5/go.mod h1:wMD2RXcsAWlkREZWJDVeDV70PYka1iEb9stFmgpw+5o= +cloud.google.com/go/gkehub v0.16.0/go.mod h1:ADp27Ucor8v81wY+x/5pOxTorxkPj/xswH3AUpN62GU= +cloud.google.com/go/gkemulticloud v1.6.0/go.mod h1:bGpd4o/Z5Z/XFlaojkgdVisHRwb+fLJvUPzsmV0I9ok= +cloud.google.com/go/grafeas v0.3.16/go.mod h1:I/yrRMOEsLasrmZXQzmDXwrJ3ZPn3dQWLaWt4lXmYvE= +cloud.google.com/go/gsuiteaddons v1.7.8/go.mod h1:DBKNHH4YXAdd/rd6zVvtOGAJNGo0ekOh+nIjTUDEJ5U= +cloud.google.com/go/iap v1.11.3/go.mod h1:+gXO0ClH62k2LVlfhHzrpiHQNyINlEVmGAE3+DB4ShU= +cloud.google.com/go/ids v1.5.7/go.mod h1:N3ZQOIgIBwwOu2tzyhmh3JDT+kt8PcoKkn2BRT9Qe4A= +cloud.google.com/go/iot v1.8.7/go.mod h1:HvVcypV8LPv1yTXSLCNK+YCtqGHhq+p0F3BXETfpN+U= +cloud.google.com/go/language v1.14.6/go.mod h1:7y3J9OexQsfkWNGCxhT+7lb64pa60e12ZCoWDOHxJ1M= +cloud.google.com/go/lifesciences v0.10.7/go.mod h1:v3AbTki9iWttEls/Wf4ag3EqeLRHofploOcpsLnu7iY= +cloud.google.com/go/managedidentities v1.7.7/go.mod h1:nwNlMxtBo2YJMvsKXRtAD1bL41qiCI9npS7cbqrsJUs= +cloud.google.com/go/maps v1.29.0/go.mod h1:FNATcM5ziB2TDE2IVWH4f/yeXc+SbUk1X+bmKjR8HEA= +cloud.google.com/go/mediatranslation v0.9.7/go.mod h1:mz3v6PR7+Fd/1bYrRxNFGnd+p4wqdc/fyutqC5QHctw= +cloud.google.com/go/memcache v1.11.7/go.mod h1:AU1jYlUqCihxapcJ1GGMtlMWDVhzjbfUWBXqsXa4rBg= +cloud.google.com/go/metastore v1.14.8/go.mod h1:h1XI2LpD4ohJhQYn9TwXqKb5sVt6KSo47ft96SiFF1s= +cloud.google.com/go/networkconnectivity v1.21.0/go.mod h1:XC1UJ+tqBsLWz73dqrMc7kUvdTv0FIxtDGv6YntTBO0= +cloud.google.com/go/networkmanagement v1.23.0/go.mod h1:QTYCWp5UxUnU280SqF7AX/mf6NhsqKblmLeCALQmx5c= +cloud.google.com/go/networksecurity v0.11.0/go.mod h1:JLgDsg4tOyJ3eMO8lypjqMftbfd60SJ+P7T+DUmWBsM= +cloud.google.com/go/notebooks v1.12.7/go.mod h1:uR9pxAkKmlNloibMr9Q1t8WhIu4P2JeqJs7c064/0Mo= +cloud.google.com/go/optimization v1.7.7/go.mod h1:OY2IAlX23o52qwMAZ0w65wibKuV12a4x6IHDTCq6kcU= +cloud.google.com/go/orchestration v1.11.10/go.mod h1:tz7m1s4wNEvhNNIM3JOMH0lYxBssu9+7si5MCPw/4/0= +cloud.google.com/go/orgpolicy v1.15.1/go.mod h1:bpvi9YIyU7wCW9WiXL/ZKT7pd2Ovegyr2xENIeRX5q0= +cloud.google.com/go/osconfig v1.16.0/go.mod h1:PRmLgZ1loD1hGaqnTBww1nETbqcqAvmTQOLYiIZ7Nvk= +cloud.google.com/go/oslogin v1.14.7/go.mod h1:NB6NqBHfDMwznePdBVX+ILllc1oPCdNSGp5u/WIyndY= +cloud.google.com/go/phishingprotection v0.9.7/go.mod h1:JTI4HNGyAbWolBoNOoCyCF0e3cqPNrYnlievHU49EwE= +cloud.google.com/go/policytroubleshooter v1.11.7/go.mod h1:JP/aQ+bUkt4Gz6lQXBi/+A/6nyNRZ0Pvxui5Xl9ieyk= +cloud.google.com/go/privatecatalog v0.10.8/go.mod h1:BkLHi+rtAGYBt5DocXLytHhF0n6F03Tegxgty40Y7aA= +cloud.google.com/go/profiler v0.4.3/go.mod h1:3xFodugWfPIQZWFcXdUmfa+yTiiyQ8fWrdT+d2Sg4J0= +cloud.google.com/go/pubsub v1.50.1/go.mod h1:6YVJv3MzWJUVdvQXG081sFvS0dWQOdnV+oTo++q/xFk= +cloud.google.com/go/pubsub/v2 v2.4.0/go.mod h1:2lS/XQKq5qtOMs6kHBK+WX1ytUC36kLl2ig3zqsGUx8= +cloud.google.com/go/pubsublite v1.8.2/go.mod h1:4r8GSa9NznExjuLPEJlF1VjOPOpgf3IT6k8x/YgaOPI= +cloud.google.com/go/recaptchaenterprise/v2 v2.21.0/go.mod h1:HxQYqZC2/zl2CvKN7jJEv71vEdDi1GMGNUiZxnpiuVI= +cloud.google.com/go/recommendationengine v0.9.7/go.mod h1:snZ/FL147u86Jqpv1j95R+CyU5NvL/UzYiyDo6UByTM= +cloud.google.com/go/recommender v1.13.6/go.mod h1:y5/5womtdOaIM3xx+76vbsiA+8EBTIVfWnxHDFHBGJM= +cloud.google.com/go/redis v1.18.3/go.mod h1:x8HtXZbvMBDNT6hMHaQ022Pos5d7SP7YsUH8fCJ2Wm4= +cloud.google.com/go/resourcemanager v1.10.7/go.mod h1:rScGkr6j2eFwxAjctvOP/8sqnEpDbQ9r5CKwKfomqjs= +cloud.google.com/go/resourcesettings v1.8.3/go.mod h1:BzgfXFHIWOOmHe6ZV9+r3OWfpHJgnqXy8jqwx4zTMLw= +cloud.google.com/go/retail v1.26.0/go.mod h1:gMfh6s174Mvy1rK4g50J9TH5sRim8px+Krml25kdrqo= +cloud.google.com/go/run v1.15.0/go.mod h1:rgFHMdAopLl++57vzeqA+a1o2x0/ILZnEacRD6nC0EA= +cloud.google.com/go/scheduler v1.11.8/go.mod h1:bNKU7/f04eoM6iKQpwVLvFNBgGyJNS87RiFN73mIPik= +cloud.google.com/go/secretmanager v1.16.0/go.mod h1://C/e4I8D26SDTz1f3TQcddhcmiC3rMEl0S1Cakvs3Q= +cloud.google.com/go/security v1.19.2/go.mod h1:KXmf64mnOsLVKe8mk/bZpU1Rsvxqc0Ej0A6tgCeN93w= +cloud.google.com/go/servicedirectory v1.12.7/go.mod h1:gOtN+qbuCMH6tj2dqlDY3qQL7w3V0+nkWaZElnJK8Ps= +cloud.google.com/go/shell v1.8.7/go.mod h1:OTke7qc3laNEW5Jr5OV9VR3IwU5x5VqGOE6705zFex4= +cloud.google.com/go/spanner v1.88.0/go.mod h1:MzulBwuuYwQUVdkZXBBFapmXee3N+sQrj2T/yup6uEE= +cloud.google.com/go/speech v1.30.0/go.mod h1:F2+NJujR8uzDLd6bwy5kgtVycxvEq06nzvzz5eQ/gMo= +cloud.google.com/go/storagetransfer v1.13.1/go.mod h1:S858w5l383ffkdqAqrAA+BC7KlhCqeNieK3sFf5Bj4Y= +cloud.google.com/go/talent v1.8.4/go.mod h1:3yukBXUTVFNyKcJpUExW/k5gqEy8qW6OCNj7WdN0MWo= +cloud.google.com/go/texttospeech v1.16.0/go.mod h1:AeSkoH3ziPvapsuyI07TWY4oGxluAjntX+pF4PJ2jy0= +cloud.google.com/go/tpu v1.8.4/go.mod h1:ul0cyWSHr6jHGZYElZe6HvQn35VY93RAlwpDiSBRnPA= +cloud.google.com/go/translate v1.12.7/go.mod h1:wwJp14NZyWvcrFANhIXutXj0pOBkYciBHwSlUOykcjI= +cloud.google.com/go/video v1.27.1/go.mod h1:xzfAC77B4vtnbi/TT3UUxEjCa/+Ehy5EA8w470ytOig= +cloud.google.com/go/videointelligence v1.12.7/go.mod h1:XAk5hCMY+GihxJ55jNoMdwdXSNZnCl3wGs2+94gK7MA= +cloud.google.com/go/vision/v2 v2.9.6/go.mod h1:lJC+vP15D5znJvHQYjEoTKnpToX1L93BUlvBmzM0gyg= +cloud.google.com/go/vmmigration v1.10.0/go.mod h1:LDztCWEb+RwS1bPg4Xzt0fcJS9kVrFxa3ejhH7OW9vg= +cloud.google.com/go/vmwareengine v1.3.6/go.mod h1:ps0rb+Skgpt9ppHYC0o5DqtJ5ld2FyS8sAqtbHH8t9s= +cloud.google.com/go/vpcaccess v1.8.7/go.mod h1:9RYw5bVvk4Z51Rc8vwXT63yjEiMD/l7XyEaDyrNHgmk= +cloud.google.com/go/webrisk v1.11.2/go.mod h1:yH44GeXz5iz4HFsIlGeoVvnjwnmfbni7Lwj1SelV4f0= +cloud.google.com/go/websecurityscanner v1.7.7/go.mod h1:ng/PzARaus3Bj4Os4LpUnyYHsbtJky1HbBDmz148v1o= +cloud.google.com/go/workflows v1.14.3/go.mod h1:CC9+YdVI2Kvp0L58WajHpEfKJxhrtRh3uQ0SYWcmAk4= +contrib.go.opencensus.io/exporter/stackdriver v0.13.15-0.20230702191903-2de6d2748484/go.mod h1:uxw+4/0SiKbbVSD/F2tk5pJTdVcfIBBcsQ8gwcu4X+E= +cuelabs.dev/go/oci/ociregistry v0.0.0-20250722084951-074d06050084/go.mod h1:4WWeZNxUO1vRoZWAHIG0KZOd6dA25ypyWuwD3ti0Tdc= +cuelang.org/go v0.15.3/go.mod h1:NYw6n4akZcTjA7QQwJ1/gqWrrhsN4aZwhcAL0jv9rZE= +cyphar.com/go-pathrs v0.2.1/go.mod h1:y8f1EMG7r+hCuFf/rXsKqMJrJAUoADZGNh5/vZPKcGc= +github.com/4meepo/tagalign v1.3.4/go.mod h1:M+pnkHH2vG8+qhE5bVc/zeP7HS/j910Fwa9TUSyZVI0= +github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4/go.mod h1:hN7oaIRCjzsZ2dE+yG5k+rsdt3qcwykqK6HVGcKwsw4= +github.com/99designs/keyring v1.2.2/go.mod h1:wes/FrByc8j7lFOAGLGSNEg8f/PaI3cgTBqhFkHUrPk= +github.com/Abirdcfly/dupword v0.0.14/go.mod h1:VKDAbxdY8YbKUByLGg8EETzYSuC4crm9WwI6Y3S0cLI= +github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20230306123547-8075edf89bb0/go.mod h1:OahwfttHWG6eJ0clwcfBAHoDI6X/LV/15hx/wlMZSrU= +github.com/Antonboom/errname v0.1.13/go.mod h1:uWyefRYRN54lBg6HseYCFhs6Qjcy41Y3Jl/dVhA87Ns= +github.com/Antonboom/nilnil v0.1.9/go.mod h1:iGe2rYwCq5/Me1khrysB4nwI7swQvjclR8/YRPl5ihQ= +github.com/Antonboom/testifylint v1.3.1/go.mod h1:NV0hTlteCkViPW9mSR4wEMfwp+Hs1T3dY60bkvSfhpM= +github.com/Crocmagnon/fatcontext v0.2.2/go.mod h1:WSn/c/+MMNiD8Pri0ahRj0o9jVpeowzavOQplBJw6u0= +github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= +github.com/GaijinEntertainment/go-exhaustruct/v3 v3.2.0/go.mod h1:Nl76DrGNJTA1KJ0LePKBw/vznBX1EHbAZX8mwjR82nI= +github.com/GoogleCloudPlatform/grpc-gcp-go/grpcgcp v1.5.3/go.mod h1:dppbR7CwXD4pgtV9t3wD1812RaLDcBjtblcDF5f1vI0= +github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM= +github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= +github.com/MakeNowJust/heredoc/v2 v2.0.1/go.mod h1:6/2Abh5s+hc3g9nbWLe9ObDIOhaRrqsyY9MWy+4JdRM= +github.com/Masterminds/sprig v2.22.0+incompatible h1:z4yfnGrZ7netVz+0EDJ0Wi+5VZCSYp4Z0m2dk6cEM60= +github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= +github.com/Masterminds/vcs v1.13.3/go.mod h1:TiE7xuEjl1N4j016moRd6vezp6e6Lz23gypeXfzXeW8= +github.com/Microsoft/hcsshim v0.13.0/go.mod h1:9KWJ/8DgU+QzYGupX4tzMhRQE8h6w90lH6HAaclpEok= +github.com/OpenPeeDeeP/depguard/v2 v2.2.0/go.mod h1:CIzddKRvLBC4Au5aYP/i3nyaWQ+ClszLIuVocRiCYFQ= +github.com/PaesslerAG/gval v1.0.0/go.mod h1:y/nm5yEyTeX6av0OfKJNp9rBNj2XrGhAf5+v24IBN1I= +github.com/PaesslerAG/jsonpath v0.1.1/go.mod h1:lVboNxFGal/VwW6d9JzIy56bUsYAP6tH/x80vjnCseY= +github.com/PuerkitoBio/goquery v1.9.2/go.mod h1:GHPCaP0ODyyxqcNoFGYlAprUFH81NuRPd0GX3Zu2Mvk= +github.com/aclements/go-moremath v0.0.0-20210112150236-f10218a38794/go.mod h1:7e+I0LQFUI9AXWxOfsQROs9xPhoJtbsyWcjJqDd4KPY= +github.com/agnivade/levenshtein v1.2.1/go.mod h1:QVVI16kDrtSuwcpd0p1+xMC6Z/VfhtCyDIjcwga4/DU= +github.com/ahmetb/gen-crd-api-reference-docs v0.3.0/go.mod h1:TdjdkYhlOifCQWPs1UdTma97kQQMozf5h26hTuG70u8= +github.com/ajeddeloh/go-json v0.0.0-20200220154158-5ae607161559/go.mod h1:otnto4/Icqn88WCcM4bhIJNSgsh9VLBuspyyCfvof9c= +github.com/alecthomas/go-check-sumtype v0.1.4/go.mod h1:WyYPfhfkdhyrdaligV6svFopZV8Lqdzn5pyVBaV6jhQ= +github.com/alecthomas/kingpin/v2 v2.4.0/go.mod h1:0gyi0zQnjuFk8xrkNKamJoyUo382HRL7ATRpFZCw6tE= +github.com/alecthomas/units v0.0.0-20240927000941-0f3dac36c52b/go.mod h1:fvzegU4vN3H1qMT+8wDmzjAcDONcgo2/SZ/TyfdUOFs= +github.com/alessio/shellescape v1.4.1/go.mod h1:PZAiSCk0LJaZkiCSkPv8qIobYglO3FPpyFjDCtHLS30= +github.com/alexkohler/nakedret/v2 v2.0.4/go.mod h1:bF5i0zF2Wo2o4X4USt9ntUWve6JbFv02Ff4vlkmS/VU= +github.com/alexkohler/prealloc v1.0.0/go.mod h1:VetnK3dIgFBBKmg0YnD9F9x6Icjd+9cvfHR56wJVlKE= +github.com/alingse/asasalint v0.0.11/go.mod h1:nCaoMhw7a9kSJObvQyVzNTPBDbNpdocqrSP7t/cW5+I= +github.com/andybalholm/cascadia v1.3.2/go.mod h1:7gtRlve5FxPPgIgX36uWBX58OdBsSS6lUvCFb+h7KvU= +github.com/antchfx/xmlquery v1.4.3/go.mod h1:AEPEEPYE9GnA2mj5Ur2L5Q5/2PycJ0N9Fusrx9b12fc= +github.com/antchfx/xpath v1.3.3/go.mod h1:i54GszH55fYfBmoZXapTHN8T8tkcHfRgLyVwwqzXNcs= +github.com/ashanbrown/forbidigo v1.6.0/go.mod h1:Y8j9jy9ZYAEHXdu723cUlraTqbzjKF1MUyfOKL+AjcU= +github.com/ashanbrown/makezero v1.1.1/go.mod h1:i1bJLCRSCHOcOa9Y6MyF2FTfMZMFdHvxKHxgO5Z1axI= +github.com/avast/retry-go/v4 v4.7.0/go.mod h1:ZMPDa3sY2bKgpLtap9JRUgk2yTAba7cgiFhqxY2Sg6Q= +github.com/bboreham/go-loser v0.0.0-20230920113527-fcc2c21820a3/go.mod h1:CIWtjkly68+yqLPbvwwR/fjNJA/idrtULjZWh2v1ys0= +github.com/beevik/ntp v1.5.0/go.mod h1:mJEhBrwT76w9D+IfOEGvuzyuudiW9E52U2BaTrMOYow= +github.com/bgentry/speakeasy v0.2.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bkielbasa/cyclop v1.2.1/go.mod h1:K/dT/M0FPAiYjBgQGau7tz+3TMh4FWAEqlMhzFWCrgM= +github.com/blizzy78/varnamelen v0.8.0/go.mod h1:V9TzQZ4fLJ1DSrjVDfl89H7aMnTvKkApdHeyESmyR7k= +github.com/bombsimon/wsl/v4 v4.2.1/go.mod h1:Xu/kDxGZTofQcDGCtQe9KCzhHphIe0fDuyWTxER9Feo= +github.com/breml/bidichk v0.2.7/go.mod h1:YodjipAGI9fGcYM7II6wFvGhdMYsC5pHDlGzqvEW3tQ= +github.com/breml/errchkjson v0.3.6/go.mod h1:jhSDoFheAF2RSDOlCfhHO9KqhZgAYLyvHe7bRCX8f/U= +github.com/bufbuild/protocompile v0.14.1/go.mod h1:ppVdAIhbr2H8asPk6k4pY7t9zB1OU5DoEw9xY/FUi1c= +github.com/bugsnag/bugsnag-go v1.5.3/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= +github.com/bugsnag/panicwrap v1.2.0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= +github.com/buildkite/agent/v3 v3.115.2/go.mod h1:a3t090/PPxAIIPCjlXF5fhfRvG0E9huFsnMX7B76iIQ= +github.com/buildkite/go-pipeline v0.16.0/go.mod h1:VE37qY3X5pmAKKUMoDZvPsHOQuyakB9cmXj9Qn6QasA= +github.com/buildkite/interpolate v0.1.5/go.mod h1:dHnrwHew5O8VNOAgMDpwRlFnhL5VSN6M1bHVmRZ9Ccc= +github.com/buildkite/roko v1.4.0/go.mod h1:0vbODqUFEcVf4v2xVXRfZZRsqJVsCCHTG/TBRByGK4E= +github.com/butuzov/ireturn v0.3.0/go.mod h1:A09nIiwiqzN/IoVo9ogpa0Hzi9fex1kd9PSD6edP5ZA= +github.com/butuzov/mirror v1.2.0/go.mod h1:DqZZDtzm42wIAIyHXeN8W/qb1EPlb9Qn/if9icBOpdQ= +github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= +github.com/catenacyber/perfsprint v0.7.1/go.mod h1:/wclWYompEyjUD2FuIIDVKNkqz7IgBIWXIH3V0Zol50= +github.com/cavaliercoder/badio v0.0.0-20160213150051-ce5280129e9e/go.mod h1:V284PjgVwSk4ETmz84rpu9ehpGg7swlIH8npP9k2bGw= +github.com/cavaliercoder/go-rpm v0.0.0-20200122174316-8cb9fd9c31a8/go.mod h1:AZIh1CCnMrcVm6afFf96PBvE2MRpWFco91z8ObJtgDY= +github.com/cavaliergopher/cpio v1.0.1/go.mod h1:pBdaqQjnvXxdS/6CvNDwIANIFSP0xRKI16PX4xejRQc= +github.com/ccojocar/zxcvbn-go v1.0.2/go.mod h1:g1qkXtUSvHP8lhHp5GrSmTz6uWALGRMQdw6Qnz/hi60= +github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/chainguard-dev/clog v1.7.0/go.mod h1:4+WFhRMsGH79etYXY3plYdp+tCz/KCkU8fAr0HoaPvs= +github.com/charithe/durationcheck v0.0.10/go.mod h1:bCWXb7gYRysD1CU3C+u4ceO49LoGOY1C1L6uouGNreQ= +github.com/chavacava/garif v0.1.0/go.mod h1:XMyYCkEL58DF0oyW4qDjjnPWONs2HBqYKI+UIPD+Gww= +github.com/cheggaaa/pb/v3 v3.1.6/go.mod h1:urxmfVtaxT+9aWk92DbsvXFZtNSWQSO5TRAp+MJ3l1s= +github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk= +github.com/cilium/ebpf v0.9.1/go.mod h1:+OhNOIXx/Fnu1IE8bJz2dzOA+VSfyTfdNUVdlQnxUFY= +github.com/ckaznocha/intrange v0.1.2/go.mod h1:RWffCw/vKBwHeOEwWdCikAtY0q4gGt8VhJZEEA5n+RE= +github.com/ckaznocha/protoc-gen-lint v0.3.0/go.mod h1:ASGO5J8wYQ8yJPBE68EntfsSKRU8tp7qAskT3BjIsvE= +github.com/clarketm/json v1.17.1/go.mod h1:ynr2LRfb0fQU34l07csRNBTcivjySLLiY1YzQqKVfdo= +github.com/clipperhouse/stringish v0.1.1/go.mod h1:v/WhFtE1q0ovMta2+m+UbpZ+2/HEXNWYXQgCt4hdOzA= +github.com/cloudflare/backoff v0.0.0-20161212185259-647f3cdfc87a/go.mod h1:rzgs2ZOiguV6/NpiDgADjRLPNyZlApIWxKpkT+X8SdY= +github.com/cloudflare/redoctober v0.0.0-20211013234631-6a74ccc611f6/go.mod h1:Ikt4Wfpln1YOrak+auA8BNxgiilj0Y2y7nO+aN2eMzk= +github.com/cockroachdb/apd/v3 v3.2.1/go.mod h1:klXJcjp+FffLTHlhIG69tezTDvdP065naDsHzKhYSqc= +github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= +github.com/containerd/btrfs/v2 v2.0.0/go.mod h1:swkD/7j9HApWpzl8OHfrHNxppPd9l44DFZdF94BUj9k= +github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= +github.com/containerd/cgroups/v3 v3.0.5/go.mod h1:SA5DLYnXO8pTGYiAHXz94qvLQTKfVM5GEVisn4jpins= +github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= +github.com/containerd/containerd/api v1.8.0/go.mod h1:dFv4lt6S20wTu/hMcP4350RL87qPWLVa/OHOwmmdnYc= +github.com/containerd/continuity v0.4.4/go.mod h1:/lNJvtJKUQStBzpVQ1+rasXO1LAWtUQssk28EZvJ3nE= +github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk= +github.com/containerd/fifo v1.1.0/go.mod h1:bmC4NWMbXlt2EZ0Hc7Fx7QzTFxgPID13eH0Qu+MAb2o= +github.com/containerd/go-cni v1.1.9/go.mod h1:XYrZJ1d5W6E2VOvjffL3IZq0Dz6bsVlERHbekNK90PM= +github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= +github.com/containerd/imgcrypt v1.1.8/go.mod h1:x6QvFIkMyO2qGIY2zXc88ivEzcbgvLdWjoZyGqDap5U= +github.com/containerd/nri v0.8.0/go.mod h1:uSkgBrCdEtAiEz4vnrq8gmAC4EnVAM5Klt0OuK5rZYQ= +github.com/containerd/ttrpc v1.2.7/go.mod h1:YCXHsb32f+Sq5/72xHubdiJRQY9inL4a4ZQrAbN1q9o= +github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s= +github.com/containerd/typeurl/v2 v2.2.3/go.mod h1:95ljDnPfD3bAbDJRugOiShd/DlAAsxGtUBhJxIn7SCk= +github.com/containerd/zfs v1.1.0/go.mod h1:oZF9wBnrnQjpWLaPKEinrx3TQ9a+W/RJO7Zb41d8YLE= +github.com/containernetworking/cni v1.1.2/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw= +github.com/containernetworking/plugins v1.2.0/go.mod h1:/VjX4uHecW5vVimFa1wkG4s+r/s9qIfPdqlLF4TW8c4= +github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= +github.com/containers/ocicrypt v1.2.1/go.mod h1:aD0AAqfMp0MtwqWgHM1bUwe1anx0VazI108CRrSKINQ= +github.com/coreos/fcct v0.5.0/go.mod h1:cbE+j77YSQwFB2fozWVB3qsI2Pi3YiVEbDz/b6Yywdo= +github.com/coreos/go-json v0.0.0-20230131223807-18775e0fb4fb/go.mod h1:rcFZM3uxVvdyNmsAV2jopgPD1cs5SPWJWU5dOz2LUnw= +github.com/coreos/go-oidc v2.3.0+incompatible h1:+5vEsrgprdLjjQ9FzIKAzQz1wwPD+83hQRfUIPh7rO0= +github.com/coreos/go-oidc v2.3.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= +github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f h1:JOrtw2xFKzlg+cbHpyrpLDmnN1HqhBfnX7WDiW7eG2c= +github.com/coreos/ign-converter v0.0.0-20241125185625-2f773079ca81/go.mod h1:Q7SbzjFkayIfwm+b+nXedvIcP2SFAndA7ET/JPNNc1I= +github.com/coreos/ignition v0.35.0/go.mod h1:WJQapxzEn9DE0ryxsGvm8QnBajm/XsS/PkrDqSpz+bA= +github.com/coreos/ignition/v2 v2.21.0/go.mod h1:axhFZ3jEgXBjKtKp0rSMv2li0Rt43rasp5hS9uyYjco= +github.com/coreos/vcontext v0.0.0-20230201181013-d72178a18687/go.mod h1:Salmysdw7DAVuobBW/LwsKKgpyCPHUhjyJoMJD+ZJiI= +github.com/curioswitch/go-reassign v0.2.0/go.mod h1:x6OpXuWvgfQaMGks2BZybTngWjT84hqJfKoO8Tt/Roc= +github.com/daixiang0/gci v0.13.4/go.mod h1:12etP2OniiIdP4q+kjUGrC/rUagga7ODbqsom5Eo5Yk= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.4.0/go.mod h1:ZXNYxsqcloTdSy/rNShjYzMhyjf0LaoftYK0p+A3h40= +github.com/denis-tingaikin/go-header v0.5.0/go.mod h1:mMenU5bWrok6Wl2UsZjy+1okegmwQ3UgWl4V1D8gjlY= +github.com/denisenkom/go-mssqldb v0.9.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= +github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= +github.com/docker/docker v28.5.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= +github.com/dvsekhvalnov/jose2go v1.6.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB/mPZadG+mhXU= +github.com/edsrzf/mmap-go v1.2.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q= +github.com/eggsampler/acme/v3 v3.6.2/go.mod h1:/qh0rKC/Dh7Jj+p4So7DbWmFNzC4dpcpK53r226Fhuo= +github.com/emicklei/go-restful v2.15.0+incompatible h1:8KpYO/Xl/ZudZs5RNOEhWMBY4hmzlZhhRd9cu+jrZP4= +github.com/emicklei/proto v1.14.2/go.mod h1:rn1FgRS/FANiZdD2djyH7TMA9jdRDcYQ9IEN9yvjX0A= +github.com/ettle/strcase v0.2.0/go.mod h1:DajmHElDSaX76ITe3/VHVyMin4LWSJN5Z909Wp+ED1A= +github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb/go.mod h1:bH6Xx7IW64qjjJq8M2u4dxNaBiDfKK+z/3eGDpXEQhc= +github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= +github.com/firefart/nonamedreturns v1.0.5/go.mod h1:gHJjDqhGM4WyPt639SOZs+G89Ko7QKH5R5BhnO6xJhw= +github.com/flynn/go-docopt v0.0.0-20140912013429-f6dd2ebbb31e/go.mod h1:HyVoz1Mz5Co8TFO8EupIdlcpwShBmY98dkT2xeHkvEI= +github.com/fullstorydev/grpcurl v1.9.3/go.mod h1:/b4Wxe8bG6ndAjlfSUjwseQReUDUvBJiFEB7UllOlUE= +github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA= +github.com/gabriel-vasile/mimetype v1.4.11/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s= +github.com/ghostiam/protogetter v0.3.6/go.mod h1:7lpeDnEJ1ZjL/YtyoN99ljO4z0pd3H0d18/t2dPBxHw= +github.com/go-chi/chi v4.1.2+incompatible h1:fGFk2Gmi/YKXk0OmGfBh0WgmN3XB8lVnEyNz34tQRec= +github.com/go-chi/chi v4.1.2+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ= +github.com/go-critic/go-critic v0.11.4/go.mod h1:2QAdo4iuLik5S9YG0rT4wcZ8QxwHYkrr6/2MWAiv/vc= +github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/go-jose/go-jose/v3 v3.0.4/go.mod h1:5b+7YgP7ZICgJDBdfjZaIt+H/9L9T/YQrVfLAMboGkQ= +github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-piv/piv-go/v2 v2.4.0/go.mod h1:ShZi74nnrWNQEdWzRUd/3cSig3uNOcEZp+EWl0oewnI= +github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= +github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= +github.com/go-playground/validator/v10 v10.29.0/go.mod h1:D6QxqeMlgIPuT02L66f2ccrZ7AGgHkzKmmTMZhk/Kc4= +github.com/go-redis/redismock/v9 v9.2.0/go.mod h1:18KHfGDK4Y6c2R0H38EUGWAdc7ZQS9gfYxc94k7rWT0= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-toolsmith/astcast v1.1.0/go.mod h1:qdcuFWeGGS2xX5bLM/c3U9lewg7+Zu4mr+xPwZIB4ZU= +github.com/go-toolsmith/astcopy v1.1.0/go.mod h1:hXM6gan18VA1T/daUEHCFcYiW8Ai1tIwIzHY6srfEAw= +github.com/go-toolsmith/astequal v1.2.0/go.mod h1:c8NZ3+kSFtFY/8lPso4v8LuJjdJiUFVnSuU3s0qrrDY= +github.com/go-toolsmith/astfmt v1.1.0/go.mod h1:OrcLlRwu0CuiIBp/8b5PYF9ktGVZUjlNMV634mhwuQ4= +github.com/go-toolsmith/astp v1.1.0/go.mod h1:0T1xFGz9hicKs8Z5MfAqSUitoUYS30pDMsRVIDHs8CA= +github.com/go-toolsmith/strparse v1.1.0/go.mod h1:7ksGy58fsaQkGQlY8WVoBFNyEPMGuJin1rfoPS4lBSQ= +github.com/go-toolsmith/typep v1.1.0/go.mod h1:fVIw+7zjdsMxDA3ITWnH1yOiw1rnTQKCsF/sk2H/qig= +github.com/go-xmlfmt/xmlfmt v1.1.2/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= +github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= +github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 h1:ZpnhV/YsD2/4cESfV5+Hoeu/iUR3ruzNvZ+yQfO03a0= +github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= +github.com/godror/godror v0.40.4/go.mod h1:i8YtVTHUJKfFT3wTat4A9UoqScUtZXiYB9Rf3SVARgc= +github.com/godror/knownpb v0.1.1/go.mod h1:4nRFbQo1dDuwKnblRXDxrfCFYeT4hjg3GjMqef58eRE= +github.com/gofrs/uuid v4.4.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4= +github.com/gogo/status v1.1.0/go.mod h1:BFv9nrluPLmrS0EmGVvLaPNmRosr9KapBYd5/hpY1WM= +github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= +github.com/golang-sql/sqlexp v0.1.0/go.mod h1:J4ad9Vo8ZCWQ2GMrC4UCQy1JpCbwU9m3EOqtpKwwwHI= +github.com/golang/mock v1.7.0-rc.1/go.mod h1:s42URUywIqd+OcERslBJvOjepvNymP31m3q8d/GkuRs= +github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk= +github.com/golangci/gofmt v0.0.0-20231018234816-f50ced29576e/go.mod h1:Pm5KhLPA8gSnQwrQ6ukebRcapGb/BG9iUkdaiCcGHJM= +github.com/golangci/golangci-lint v1.59.1/go.mod h1:jX5Oif4C7P0j9++YB2MMJmoNrb01NJ8ITqKWNLewThg= +github.com/golangci/misspell v0.6.0/go.mod h1:keMNyY6R9isGaSAu+4Q8NMBwMPkh15Gtc8UCVoDtAWo= +github.com/golangci/modinfo v0.3.4/go.mod h1:wytF1M5xl9u0ij8YSvhkEVPP3M5Mc7XLl1pxH3B2aUM= +github.com/golangci/plugin-module-register v0.1.2/go.mod h1:1+QGTsKBvAIvPvoY/os+G5eoqxWn70HYDm2uvUyGuVw= +github.com/golangci/revgrep v0.5.3/go.mod h1:U4R/s9dlXZsg8uJmaR1GrloUr14D7qDl8gi2iPXJH8k= +github.com/golangci/unconvert v0.0.0-20240309020433-c5143eacb3ed/go.mod h1:XLXN8bNw4CGRPaqgl3bv/lhz7bsGPh4/xSaMTbo2vkQ= +github.com/google/addlicense v1.1.1/go.mod h1:Sm/DHu7Jk+T5miFHHehdIjbi4M5+dJDRS3Cq0rncIxA= +github.com/google/generative-ai-go v0.19.0/go.mod h1:JYolL13VG7j79kM5BtHz4qwONHkeJQzOCkKXnpqtS/E= +github.com/google/go-intervals v0.0.2/go.mod h1:MkaR3LNRfeKLPmqgJYs4E66z5InYjmCjbbr4TQlcT6Y= +github.com/google/go-pkcs11 v0.3.0/go.mod h1:6eQoGcuNJpa7jnd5pMGdkSaQpNDYvPlXWMcjXXThLlY= +github.com/google/rpmpack v0.7.1/go.mod h1:h1JL16sUTWCLI/c39ox1rDaTBo3BXUQGjczVJyK4toU= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= +github.com/google/wire v0.7.0/go.mod h1:n6YbUQD9cPKTnHXEBN2DXlOp/mVADhVErcMFb0v3J18= +github.com/gookit/color v1.6.0/go.mod h1:9ACFc7/1IpHGBW8RwuDm/0YEnhg3dwwXpoMsmtyHfjs= +github.com/gordonklaus/ineffassign v0.1.0/go.mod h1:Qcp2HIAYhR7mNUVSIxZww3Guk4it82ghYcEXIAk+QT0= +github.com/gostaticanalysis/analysisutil v0.7.1/go.mod h1:v21E3hY37WKMGSnbsw2S/ojApNWb6C1//mXO48CXbVc= +github.com/gostaticanalysis/comment v1.4.2/go.mod h1:KLUTGDv6HOCotCH8h2erHKmpci2ZoR8VPu34YA2uzdM= +github.com/gostaticanalysis/forcetypeassert v0.1.0/go.mod h1:qZEedyP/sY1lTGV1uJ3VhWZ2mqag3IkWsDHVbplHXak= +github.com/gostaticanalysis/nilerr v0.1.1/go.mod h1:wZYb6YI5YAxxq0i1+VJbY0s2YONW0HU0GPE3+5PWN4A= +github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.1.0/go.mod h1:hM2alZsMUni80N33RBe6J0e423LB+odMj7d3EMP9l20= +github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= +github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c/go.mod h1:NMPJylDgVpX0MLRlPy15sqSwOFv/U1GZ2m21JhFfek0= +github.com/guptarohit/asciigraph v0.5.5/go.mod h1:dYl5wwK4gNsnFf9Zp+l06rFiDZ5YtXM6x7SRWZ3KGag= +github.com/guregu/null v4.0.0+incompatible/go.mod h1:ePGpQaN9cw0tj45IR5E5ehMvsFlLlQZAkkOXZurJ3NM= +github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= +github.com/hydrogen18/memlistener v1.0.0/go.mod h1:qEIFzExnS6016fRpRfxrExeVn2gbClQA99gQhnIcdhE= +github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= +github.com/ianlancetaylor/demangle v0.0.0-20250417193237-f615e6bd150b/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw= +github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= +github.com/intel/goresctrl v0.5.0/go.mod h1:mIe63ggylWYr0cU/l8n11FAkesqfvuP3oktIsxvu0T0= +github.com/jackc/chunkreader v1.0.0 h1:4s39bBR8ByfqH+DKm8rQA3E1LHZWB9XWcrz8fqaZbe0= +github.com/jackc/pgproto3 v1.1.0 h1:FYYE4yRw+AgI8wXIinMlNjBbp/UitDJwfj5LqqewP1A= +github.com/jackc/puddle v1.3.0 h1:eHK/5clGOatcjX3oWGBO/MpxpbHzSwud5EWTSCI+MX0= +github.com/jaytaylor/html2text v0.0.0-20230321000545-74c2419ad056/go.mod h1:CVKlgaMiht+LXvHG173ujK6JUhZXKb2u/BQtjPDIvyk= +github.com/jessevdk/go-flags v1.6.1/go.mod h1:Mk8T1hIAWpOiJiHa9rJASDK2UGWji0EuPGBnNLMooyc= +github.com/jgautheron/goconst v1.7.1/go.mod h1:aAosetZ5zaeC/2EfMeRswtxUFBpe2Hr7HzkgX4fanO4= +github.com/jhump/protoreflect v1.17.0/go.mod h1:h9+vUUL38jiBzck8ck+6G/aeMX8Z4QUY/NiJPwPNi+8= +github.com/jingyugao/rowserrcheck v1.1.1/go.mod h1:4yvlZSDb3IyDTUZJUmpZfm2Hwok+Dtp+nu2qOq+er9c= +github.com/jirfag/go-printf-func-name v0.0.0-20200119135958-7558a9eaa5af/go.mod h1:HEWGJkRDzjJY2sqdDwxccsGicWEf9BQOZsq2tV+xzM0= +github.com/jjti/go-spancheck v0.6.1/go.mod h1:vF1QkOO159prdo6mHRxak2CpzDpHAfKiPUDP/NeRnX8= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/julz/importas v0.1.0/go.mod h1:oSFU2R4XK/P7kNBrnL/FEQlDGN1/6WoxXEjSSXO0DV0= +github.com/karamaru-alpha/copyloopvar v1.1.0/go.mod h1:u7CIfztblY0jZLOQZgH3oYsJzpC2A7S6u/lfgSXHy0k= +github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= +github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= +github.com/kisielk/errcheck v1.7.0/go.mod h1:1kLL+jV4e+CFfueBmI1dSK2ADDyQnlrnrY/FqKluHJQ= +github.com/kkHAIKE/contextcheck v1.1.5/go.mod h1:O930cpht4xb1YQpK+1+AgoM3mFsvxr7uyFptcnWTYUA= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kulti/thelper v0.6.3/go.mod h1:DsqKShOvP40epevkFrvIwkCMNYxMeTNjdWL4dqWHZ6I= +github.com/kunwardeep/paralleltest v1.0.10/go.mod h1:2C7s65hONVqY7Q5Efj5aLzRCNLjw2h4eMc9EcypGjcY= +github.com/kylelemons/go-gypsy v1.0.0/go.mod h1:chkXM0zjdpXOiqkCW1XcCHDfjfk14PH2KKkQWxfJUcU= +github.com/kyoh86/exportloopref v0.1.11/go.mod h1:qkV4UF1zGl6EkF1ox8L5t9SwyeBAZ3qLMd6up458uqA= +github.com/lasiar/canonicalheader v1.1.1/go.mod h1:cXkb3Dlk6XXy+8MVQnF23CYKWlyA7kfQhSw2CcZtZb0= +github.com/ldez/gomoddirectives v0.2.4/go.mod h1:oWu9i62VcQDYp9EQ0ONTfqLNh+mDLWWDO+SO0qSQw5g= +github.com/ldez/tagliatelle v0.5.0/go.mod h1:rj1HmWiL1MiKQuOONhd09iySTEkUuE/8+5jtPYz9xa4= +github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= +github.com/leonklingele/grouper v1.1.2/go.mod h1:6D0M/HVkhs2yRKRFZUoGjeDy7EZTfFBE9gl4kjmIGkA= +github.com/lestrrat-go/blackmagic v1.0.4/go.mod h1:6AWFyKNNj0zEXQYfTMPfZrAXUWUfTIZ5ECEUEJaijtw= +github.com/lestrrat-go/dsig v1.0.0/go.mod h1:dEgoOYYEJvW6XGbLasr8TFcAxoWrKlbQvmJgCR0qkDo= +github.com/lestrrat-go/dsig-secp256k1 v1.0.0/go.mod h1:CxUgAhssb8FToqbL8NjSPoGQlnO4w3LG1P0qPWQm/NU= +github.com/lestrrat-go/httpcc v1.0.1/go.mod h1:qiltp3Mt56+55GPVCbTdM9MlqhvzyuL6W/NMDA8vA5E= +github.com/lestrrat-go/httprc/v3 v3.0.1/go.mod h1:2uAvmbXE4Xq8kAUjVrZOq1tZVYYYs5iP62Cmtru00xk= +github.com/lestrrat-go/jwx/v3 v3.0.12/go.mod h1:HiUSaNmMLXgZ08OmGBaPVvoZQgJVOQphSrGr5zMamS8= +github.com/lestrrat-go/option v1.0.1/go.mod h1:5ZHFbivi4xwXxhxY9XHDe2FHo6/Z7WWmtT7T5nBBp3I= +github.com/lestrrat-go/option/v2 v2.0.0/go.mod h1:oSySsmzMoR0iRzCDCaUfsCzxQHUEuhOViQObyy7S6Vg= +github.com/letsencrypt/borp v0.0.0-20240620175310-a78493c6e2bd/go.mod h1:gMSMCNKhxox/ccR923EJsIvHeVVYfCABGbirqa0EwuM= +github.com/letsencrypt/challtestsrv v1.3.3/go.mod h1:Ur4e4FvELUXLGhkMztHOsPIsvGxD/kzSJninOrkM+zc= +github.com/letsencrypt/pkcs11key/v4 v4.0.0/go.mod h1:EFUvBDay26dErnNb70Nd0/VW3tJiIbETBPTl9ATXQag= +github.com/letsencrypt/validator/v10 v10.0.0-20230215210743-a0c7dfc17158/go.mod h1:ZFNBS3H6OEsprCRjscty6GCBe5ZiX44x6qY4s7+bDX0= +github.com/lufeee/execinquery v1.2.1/go.mod h1:EC7DrEKView09ocscGHC+apXMIaorh4xqSxS/dy8SbM= +github.com/lyft/protoc-gen-star/v2 v2.0.4-0.20230330145011-496ad1ac90a4/go.mod h1:amey7yeodaJhXSbf/TlLvWiqQfLOSpEk//mLlc+axEk= +github.com/macabu/inamedparam v0.1.3/go.mod h1:93FLICAIk/quk7eaPPQvbzihUdn/QkGDwIZEoLtpH6I= +github.com/magefile/mage v1.15.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= +github.com/magiconair/properties v1.8.10/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg= +github.com/maratori/testableexamples v1.0.0/go.mod h1:4rhjL1n20TUTT4vdh3RDqSizKLyXp7K2u6HgraZCGzE= +github.com/maratori/testpackage v1.1.1/go.mod h1:s4gRK/ym6AMrqpOa/kEbQTV4Q4jb7WeLZzVhVVVOQMc= +github.com/matoous/godox v0.0.0-20230222163458-006bad1f9d26/go.mod h1:1BELzlh859Sh1c6+90blK8lbYy0kwQf1bYlBhBysy1s= +github.com/mattn/go-oci8 v0.1.1/go.mod h1:wjDx6Xm9q7dFtHJvIlrI99JytznLw5wQ4R+9mNXJwGI= +github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/maxbrunsfeld/counterfeiter/v6 v6.12.1/go.mod h1:RuJdxo0oI6dClIaMzdl3hewq3a065RH65dofJP03h8I= +github.com/mgechev/revive v1.3.7/go.mod h1:RJ16jUbF0OWC3co/+XTxmFNgEpUPwnnA0BRllX2aDNA= +github.com/microsoft/go-mssqldb v1.6.0/go.mod h1:00mDtPbeQCRGC1HwOOR5K/gr30P1NcEG0vx6Kbv2aJU= +github.com/mistifyio/go-zfs/v3 v3.0.1/go.mod h1:CzVgeB0RvF2EGzQnytKVvVSDwmKJXxkOTUGbNrTja/k= +github.com/mitchellh/cli v1.1.5/go.mod h1:v8+iFts2sPIKUV1ltktPXMCC8fumSKFItNcD2cLtRR4= +github.com/mitchellh/hashstructure/v2 v2.0.2/go.mod h1:MG3aRVU/N29oo/V/IhBX8GR/zz4kQkprJgF2EVszyDE= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= +github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= +github.com/moby/sys/atomicwriter v0.1.0/go.mod h1:Ul8oqv2ZMNHOceF643P6FKPXeCmYtlQMvpizfsSoaWs= +github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo= +github.com/moby/sys/signal v0.7.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg= +github.com/moby/sys/symlink v0.2.0/go.mod h1:7uZVF2dqJjG/NsClqul95CqKOBRQyYSNnJ6BMgR/gFs= +github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28= +github.com/montanaflynn/stats v0.7.0/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow= +github.com/moricho/tparallel v0.3.1/go.mod h1:leENX2cUv7Sv2qDgdi0D0fCftN8fRC67Bcn8pqzeYNI= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/mtibben/percent v0.2.1/go.mod h1:KG9uO+SZkUp+VkRHsCdYQV3XSZrrSpR3O9ibNBTZrns= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-proto-validators v0.2.0/go.mod h1:ZfA1hW+UH/2ZHOWvQ3HnQaU0DtnpXu850MZiy+YUgcc= +github.com/nakabonne/nestif v0.3.1/go.mod h1:9EtoZochLn5iUprVDmDjqGKPofoUEBL8U4Ngq6aY7OE= +github.com/nelsam/hel/v2 v2.3.3/go.mod h1:1ZTGfU2PFTOd5mx22i5O0Lc2GY933lQ2wb/ggy+rL3w= +github.com/nishanths/exhaustive v0.12.0/go.mod h1:mEZ95wPIZW+x8kC4TgC+9YCUgiST7ecevsVDTgc2obs= +github.com/nishanths/predeclared v0.2.2/go.mod h1:RROzoN6TnGQupbC+lqggsOlcgysk3LMK/HI84Mp280c= +github.com/nunnatsa/ginkgolinter v0.16.2/go.mod h1:4tWRinDN1FeJgU+iJANW/kz7xKN5nYRAOfJDQUS9dOQ= +github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/oleiade/reflections v1.1.0/go.mod h1:mCxx0QseeVCHs5Um5HhJeCKVC7AwS8kO67tky4rdisA= +github.com/olekukonko/ts v0.0.0-20171002115256-78ecb04241c0/go.mod h1:F/7q8/HZz+TXjlsoZQQKVYvXTZaFH4QRa3y+j1p7MS0= +github.com/open-policy-agent/opa v1.12.1/go.mod h1:RnDgm04GA1RjEXJvrsG9uNT/+FyBNmozcPvA2qz60M4= +github.com/opencontainers/runc v1.1.3/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg= +github.com/opencontainers/runtime-tools v0.9.1-0.20221107090550-2e043c6bd626/go.mod h1:BRHJJd0E+cx42OybVYSgUvZmU0B8P9gZuRXlZUP7TKI= +github.com/opencontainers/selinux v1.13.1/go.mod h1:S10WXZ/osk2kWOYKy1x2f/eXF5ZHJoUs8UU/2caNRbg= +github.com/openshift/build-machinery-go v0.0.0-20250530140348-dc5b2804eeee/go.mod h1:8jcm8UPtg2mCAsxfqKil1xrmRMI3a+XU2TZ9fF8A7TE= +github.com/openshift/library-go v0.0.0-20250129210218-fe56c2cf5d70/go.mod h1:TQx0VEhZ/92qRXIMDu2Wg4bUPmw5HRNE6wpSZ+IsP0Y= +github.com/openshift/machine-config-operator v0.0.1-0.20250401081735-9026ff2d802e/go.mod h1:wmBAHvqHXXSFa0yz3scg0RZLxcs5B51ZTeaVlCSPaDk= +github.com/operator-framework/api v0.29.0/go.mod h1:0whQE4mpMDd2zyHkQe+bFa3DLoRs6oGWCbu8dY/3pyc= +github.com/owenrumney/go-sarif v1.1.1 h1:QNObu6YX1igyFKhdzd7vgzmw7XsWN3/6NMGuDzBgXmE= +github.com/pborman/uuid v1.2.1/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/polyfloyd/go-errorlint v1.5.2/go.mod h1:sH1QC1pxxi0fFecsVIzBmxtrgd9IF/SkJpA6wqyKAJs= +github.com/pquerna/cachecontrol v0.1.0/go.mod h1:NrUG3Z7Rdu85UNR3vm7SOsl1nFIeSiQnrHV5K9mBcUI= +github.com/proglottis/gpgme v0.1.4/go.mod h1:5LoXMgpE4bttgwwdv9bLs/vwqv3qV7F4glEEZ7mRKrM= +github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.79.2/go.mod h1:AVMP4QEW8xuGWnxaWSpI3kKjP9fDA31nO68zsyREJZA= +github.com/prometheus-operator/prometheus-operator/pkg/client v0.79.2/go.mod h1:671/KciyzKiTmvIYTpp7CzWD1/TNXVPgeDLJcGFWrOM= +github.com/prometheus/alertmanager v0.27.0/go.mod h1:8Ia/R3urPmbzJ8OsdvmZvIprDwvwmYCmUbwBL+jlPOE= +github.com/prometheus/prometheus v0.301.0/go.mod h1:BJLjWCKNfRfjp7Q48DrAjARnCi7GhfUVvUFEAWTssZM= +github.com/prometheus/sigv4 v0.1.0/go.mod h1:doosPW9dOitMzYe2I2BN0jZqUuBrGPbXrNsTScN18iU= +github.com/protocolbuffers/txtpbfmt v0.0.0-20251016062345-16587c79cd91/go.mod h1:JSbkp0BviKovYYt9XunS95M3mLPibE9bGg+Y95DsEEY= +github.com/pseudomuto/protoc-gen-doc v1.5.1/go.mod h1:XpMKYg6zkcpgfpCfQ8GcWBDRtRxOmMR5w7pz4Xo+dYM= +github.com/pseudomuto/protokit v0.2.0/go.mod h1:2PdH30hxVHsup8KpBTOXTBeMVhJZVio3Q8ViKSAXT0Q= +github.com/quasilyte/go-ruleguard v0.4.2/go.mod h1:GJLgqsLeo4qgavUoL8JeGFNS7qcisx3awV/w9eWTmNI= +github.com/quasilyte/go-ruleguard/dsl v0.3.22/go.mod h1:KeCP03KrjuSO0H1kTuZQCWlQPulDV6YMIXmpQss17rU= +github.com/quasilyte/gogrep v0.5.0/go.mod h1:Cm9lpz9NZjEoL1tgZ2OgeUKPIxL1meE7eo60Z6Sk+Ng= +github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727/go.mod h1:rlzQ04UMyJXu/aOvhd8qT+hvDrFpiwqp8MRXDY9szc0= +github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567/go.mod h1:DWNGW8A4Y+GyBgPuaQJuWiy0XYftx4Xm/y5Jqk9I6VQ= +github.com/rcrowley/go-metrics v0.0.0-20250401214520-65e299d6c5c9/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= +github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= +github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= +github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= +github.com/russross/blackfriday v1.6.0 h1:KqfZb0pUVN2lYqZUYRddxF4OR8ZMURnJIG5Y3VRLtww= +github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY= +github.com/ryancurrah/gomodguard v1.3.2/go.mod h1:LqdemiFomEjcxOqirbQCb3JFvSxH2JUYMerTFd3sF2o= +github.com/ryanrolds/sqlclosecheck v0.5.1/go.mod h1:2g3dUjoS6AL4huFdv6wn55WpLIDjY7ZgUR4J8HOO/XQ= +github.com/sanposhiho/wastedassign/v2 v2.0.7/go.mod h1:KyZ0MWTwxxBmfwn33zh3k1dmsbF2ud9pAAGfoLfjhtI= +github.com/santhosh-tekuri/jsonschema/v5 v5.3.1/go.mod h1:uToXkOrWAZ6/Oc07xWQrPOhJotwFIyu2bBVN41fcDUY= +github.com/sashamelentyev/interfacebloat v1.1.0/go.mod h1:+Y9yU5YdTkrNvoX0xHc84dxiN1iBi9+G8zZIhPVoNjQ= +github.com/sashamelentyev/usestdlibvars v1.26.0/go.mod h1:9nl0jgOfHKWNFS43Ojw0i7aRoS4j6EBye3YBhmAIRF8= +github.com/securego/gosec/v2 v2.22.0/go.mod h1:sR5n3LzZ/52rn4xxRBJk38iPe/hjiA0CkVcyiAHNCrM= +github.com/segmentio/asm v1.2.1/go.mod h1:BqMnlJP91P8d+4ibuonYZw9mfnzI9HfxselHZr5aAcs= +github.com/segmentio/conf v1.2.0/go.mod h1:Y3B9O/PqqWqjyxyWWseyj/quPEtMu1zDp/kVbSWWaB0= +github.com/segmentio/ksuid v1.0.4/go.mod h1:/XUiZBD3kVx5SmUOl55voK5yeAbBNNIed+2O73XgrPE= +github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs= +github.com/sigstore/rekor-tiles v0.1.11 h1:0NAJ2EhD1r6DH95FUuDTqUDd+c31LSKzoXGW5ZCzFq0= +github.com/sigstore/rekor-tiles v0.1.11/go.mod h1:eGIeqASh52pgWpmp/j5KZDjmKdVwob7eTYskVVRCu5k= +github.com/sigstore/timestamp-authority v1.2.9 h1:L9Fj070/EbMC8qUk8BchkrYCS1BT5i93Bl6McwydkFs= +github.com/sigstore/timestamp-authority v1.2.9/go.mod h1:QyRnZchz4o+xdHyK5rvCWacCHxWmpX+mgvJwB1OXcLY= +github.com/sivchari/containedctx v1.0.3/go.mod h1:c1RDvCbnJLtH4lLcYD/GqwiBSSf4F5Qk0xld2rBqzJ4= +github.com/sivchari/tenv v1.7.1/go.mod h1:64yStXKSOxDfX47NlhVwND4dHwfZDdbp2Lyl018Icvg= +github.com/smallstep/pkcs7 v0.1.1/go.mod h1:dL6j5AIz9GHjVEBTXtW+QliALcgM19RtXaTeyxI+AfA= +github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= +github.com/sonatard/noctx v0.0.2/go.mod h1:kzFz+CzWSjQ2OzIm46uJZoXuBpa2+0y3T36U18dWqIo= +github.com/sourcegraph/go-diff v0.7.0/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs= +github.com/ssgreg/nlreturn/v2 v2.2.1/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= +github.com/ssor/bom v0.0.0-20170718123548-6386211fdfcf/go.mod h1:RJID2RhlZKId02nZ62WenDCkgHFerpIOmW0iT7GKmXM= +github.com/stbenjam/no-sprintf-host-port v0.1.1/go.mod h1:TLhvtIvONRzdmkFiio4O8LHsN9N74I+PhRquPsxpL0I= +github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6/go.mod h1:39R/xuhNgVhi+K0/zst4TLrJrVmbm6LVgl4A0+ZFS5M= +github.com/sylabs/sif/v2 v2.21.1/go.mod h1:YoqEGQnb5x/ItV653bawXHZJOXQaEWpGwHsSD3YePJI= +github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/t-yuki/gocover-cobertura v0.0.0-20180217150009-aaee18c8195c/go.mod h1:SbErYREK7xXdsRiigaQiQkI9McGRzYMvlKYaP3Nimdk= +github.com/tchap/go-patricia/v2 v2.3.3/go.mod h1:VZRHKAb53DLaG+nA9EaYYiaEx6YztwDlLElMsnSHD4k= +github.com/tdakkota/asciicheck v0.2.0/go.mod h1:Qb7Y9EgjCLJGup51gDHFzbI08/gbGhL/UVhYIPWG2rg= +github.com/tetafro/godot v1.4.16/go.mod h1:2oVxTBSftRTh4+MVfUaUXR6bn2GDXCaMcOG4Dk3rfio= +github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966/go.mod h1:27bSVNWSBOHm+qRp1T9qzaIpsWEP6TbUnei/43HK+PQ= +github.com/timonwong/loggercheck v0.9.4/go.mod h1:caz4zlPcgvpEkXgVnAJGowHAMW2NwHaNlpS8xDbVhTg= +github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk= +github.com/tomarrell/wrapcheck/v2 v2.8.3/go.mod h1:g9vNIyhb5/9TQgumxQyOEqDHsmGYcGsVMOx/xGkqdMo= +github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce/go.mod h1:o8v6yHRoik09Xen7gje4m9ERNah1d1PPsVq1VEx9vE4= +github.com/tommy-muehle/go-mnd/v2 v2.5.1/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= +github.com/transparency-dev/tessera v1.0.1-0.20251104110637-ba6c65c4ae73/go.mod h1:hxs+XmMCxM44pskCyfRFhEuUkpETNcfl6fTNOFsh7O8= +github.com/ultraware/funlen v0.1.0/go.mod h1:XJqmOQja6DpxarLj6Jj1U7JuoS8PvL4nEqDaQhy22p4= +github.com/ultraware/whitespace v0.1.1/go.mod h1:XcP1RLD81eV4BW8UhQlpaR+SDc2givTvyI8a586WjW8= +github.com/urfave/cli v1.22.16/go.mod h1:EeJR6BKodywf4zciqrdw6hpCPk68JO9z5LazXZMn5Po= +github.com/urfave/cli/v2 v2.27.6/go.mod h1:3Sevf16NykTbInEnD0yKkjDAeZDS0A6bzhBH5hrMvTQ= +github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= +github.com/uudashr/gocognit v1.1.2/go.mod h1:aAVdLURqcanke8h3vg35BC++eseDm66Z7KmchI5et4k= +github.com/uwu-tools/magex v0.10.1/go.mod h1:5uQvmocqEueCbgK4Dm67mIfhjq80o408F17J6867go8= +github.com/valyala/fastjson v1.6.4/go.mod h1:CLCAqky6SMuOcxStkYQvblddUtoRxhYMGLrsQns1aXY= +github.com/vbauerster/mpb/v8 v8.10.2/go.mod h1:+Ja4P92E3/CorSZgfDtK46D7AVbDqmBQRTmyTqPElo0= +github.com/vektah/gqlparser/v2 v2.5.31/go.mod h1:c1I28gSOVNzlfc4WuDlqU7voQnsqI6OG2amkBAFmgts= +github.com/veraison/go-cose v1.3.0/go.mod h1:df09OV91aHoQWLmy1KsDdYiagtXgyAwAl8vFeFn1gMc= +github.com/vincent-petithory/dataurl v1.0.0/go.mod h1:FHafX5vmDzyP+1CQATJn7WFKc9CvnvxyvZy6I1MrG/U= +github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= +github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= +github.com/withfig/autocomplete-tools/integrations/cobra v1.2.1/go.mod h1:nmuySobZb4kFgFy6BptpXp/BBw+xFSyvVPP6auoJB4k= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= +github.com/xen0n/gosmopolitan v1.2.2/go.mod h1:7XX7Mj61uLYrj0qmeN0zi7XDon9JRAEhYQqAPLVNTeg= +github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= +github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM= +github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1/go.mod h1:Ohn+xnUBiLI6FVj/9LpzZWtj1/D6lUovWYBkxHVV3aM= +github.com/yagipy/maintidx v1.0.0/go.mod h1:0qNf/I/CCZXSMhsRsrEPDZ+DkekpKLXAJfsTACwgXLk= +github.com/yashtewari/glob-intersection v0.2.0/go.mod h1:LK7pIC3piUjovexikBbJ26Yml7g8xa5bsjfx2v1fwok= +github.com/yeya24/promlinter v0.3.0/go.mod h1:cDfJQQYv9uYciW60QT0eeHlFodotkYZlL+YcPQN+mW4= +github.com/ykadowak/zerologlint v0.1.5/go.mod h1:KaUskqF3e/v59oPmdq1U1DnKcuHokl2/K1U4pmIELKg= +github.com/yvasiyarov/go-metrics v0.0.0-20150112132944-c25f46c4b940/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= +github.com/yvasiyarov/gorelic v0.0.7/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= +github.com/yvasiyarov/newrelic_platform_go v0.0.0-20160601141957-9c099fbc30e9/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= +github.com/zeebo/errs v1.4.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4= +github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0= +gitlab.com/bosi/decorder v0.4.2/go.mod h1:muuhHoaJkA9QLcYHq4Mj8FJUwDZ+EirSHRiaTcTf6T8= +go-simpler.org/musttag v0.12.2/go.mod h1:uN1DVIasMTQKk6XSik7yrJoEysGtR2GRqvWnI9S7TYM= +go-simpler.org/sloglint v0.7.1/go.mod h1:OlaVDRh/FKKd4X4sIMbsz8st97vomydceL146Fthh/c= +go.etcd.io/etcd/api/v3 v3.6.8/go.mod h1:qyQj1HZPUV3B5cbAL8scG62+fyz5dSxxu0w8pn28N6Q= +go.etcd.io/etcd/client/pkg/v3 v3.6.8/go.mod h1:GsiTRUZE2318PggZkAo6sWb6l8JLVrnckTNfbG8PWtw= +go.etcd.io/etcd/client/v3 v3.6.8/go.mod h1:MVG4BpSIuumPi+ELF7wYtySETmoTWBHVcDoHdVupwt8= +go.etcd.io/etcd/etcdctl/v3 v3.6.8/go.mod h1:8X8SvxOc5kPQ0e+jbSx3RgKzTNQ3O8rBuQEoDKuQFX0= +go.etcd.io/etcd/etcdutl/v3 v3.6.8/go.mod h1:HGfpMG6Sjo9S6KWeXctiYcN8LjLbbUBdAjCYb8V977w= +go.etcd.io/etcd/pkg/v3 v3.6.8/go.mod h1:TRibVNe+FqJIe1abOAA1PsuQ4wqO87ZaOoprg09Tn8c= +go.etcd.io/etcd/server/v3 v3.6.8/go.mod h1:88dCtwUnSirkUoJbflQxxWXqtBSZa6lSG0Kuej+dois= +go.etcd.io/etcd/tests/v3 v3.6.8/go.mod h1:U1ioDy7TXzz2UXhSQfbJ3++PsryNwiniHtdbXZPprX0= +go.etcd.io/etcd/v3 v3.6.8/go.mod h1:syLTueu7AV0Pw/TcOTHEeWOtcAD/xFnnXB0gukO92Vc= +go.etcd.io/gofail v0.2.0/go.mod h1:nL3ILMGfkXTekKI3clMBNazKnjUZjYLKmBHzsVAnC1o= +go.etcd.io/raft/v3 v3.6.0/go.mod h1:nLvLevg6+xrVtHUmVaTcTz603gQPHfh7kUAwV6YpfGo= +go.mongodb.org/mongo-driver v1.17.6/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ= +go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/collector/pdata v1.22.0/go.mod h1:nLLf6uDg8Kn5g3WNZwGyu8+kf77SwOqQvMTb5AXEbEY= +go.opentelemetry.io/collector/semconv v0.116.0/go.mod h1:N6XE8Q0JKgBN2fAhkUQtqK9LT7rEGR6+Wu/Rtbal1iI= +go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.58.0/go.mod h1:uosvgpqTcTXtcPQORTbEkZNDQTCDOgTz1fe6aLSyqrQ= +go.starlark.net v0.0.0-20230525235612-a134d8f9ddca/go.mod h1:jxU+3+j+71eXOW14274+SmmuW82qJzl6iZSeqEtTGds= +go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0= +go.uber.org/ratelimit v0.3.1/go.mod h1:6euWsTB6U/Nb3X++xEUXA8ciPJvr19Q/0h1+oDcJhRk= +go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= +go4.org v0.0.0-20200104003542-c7e774b10ea0/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= +goa.design/goa/v3 v3.23.4/go.mod h1:da3W585WfJe9gT+hJCbP8YFB9yc4gmuCwB0MvkbwhXk= +gocloud.dev v0.45.0/go.mod h1:0kXKmkCLG6d31N7NyLZWzt7jDSQura9zD/mWgiB6THI= +golang.org/x/exp/typeparams v0.0.0-20240314144324-c7f7c6466f7f/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/mod v0.33.0/go.mod h1:swjeQEj+6r7fODbD2cqrnje9PnziFuw4bmLbBZFrQ5w= +golang.org/x/perf v0.0.0-20230113213139-801c7ef9e5c5/go.mod h1:UBKtEnL8aqnd+0JHqZ+2qoMDwtuy6cYhhKNoHLBiTQc= +golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/telemetry v0.0.0-20260311193753-579e4da9a98c/go.mod h1:TpUTTEp9frx7rTdLpC9gFG9kdI7zVLFTFFlqaH2Cncw= +golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= +golang.org/x/tools v0.42.0/go.mod h1:Ma6lCIwGZvHK6XtgbswSoWroEkhugApmsXyrUmBhfr0= +golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= +google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= +google.golang.org/genproto/googleapis/bytestream v0.0.0-20260311181403-84a4fc48630c/go.mod h1:9amqk/8LQWEC4RjyUxMx1DebyQ7hZB9gvl67bHmgZ2E= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.6.1/go.mod h1:YNKnb2OAApgYn2oYY47Rn7alMr1zWjb2U8Q0aoGWiNc= +google.golang.org/grpc/gcp/observability v1.0.1/go.mod h1:yM0UcrYRMe/B+Nu0mDXeTJNDyIMJRJnzuxqnJMz7Ewk= +google.golang.org/grpc/security/advancedtls v1.0.0/go.mod h1:o+s4go+e1PJ2AjuQMY5hU82W7lDlefjJA6FqEHRVHWk= +google.golang.org/grpc/stats/opencensus v1.0.0/go.mod h1:FhdkeYvN43wLYUnapVuRJJ9JXkNwe403iLUW2LKSnjs= +gopkg.in/dnaeon/go-vcr.v3 v3.2.0/go.mod h1:2IMOnnlx9I6u9x+YBsM3tAMx6AlOxnJ0pWxQAzZ79Ag= +gopkg.in/go-jose/go-jose.v2 v2.6.3/go.mod h1:zzZDPkNNw/c9IE7Z9jr11mBZQhKQTMzoEEIoEdZlFBI= +gorm.io/driver/sqlite v1.6.0/go.mod h1:AO9V1qIQddBESngQUKWL9yoH93HIeA1X6V633rBwyT8= +honnef.co/go/tools v0.4.7/go.mod h1:+rnGS1THNh8zMwnd2oVOTL9QF6vmfyG6ZXBULae2uc0= +k8s.io/cri-api v0.32.13/go.mod h1:DCzMuTh2padoinefWME0G678Mc3QFbLMF2vEweGzBAI= +k8s.io/gengo v0.0.0-20230829151522-9cce18d56c01/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/klog v0.2.0 h1:0ElL0OHzF3N+OhoJTL0uca20SxtYt4X4+bzHeqrB83c= +k8s.io/klog v0.2.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/kms v0.35.3/go.mod h1:VT+4ekZAdrZDMgShK37vvlyHUVhwI9t/9tvh0AyCWmQ= +k8s.io/kube-aggregator v0.32.1/go.mod h1:sXjL5T8FO/rlBzTbBhahw9V5Nnr1UtzZHKTj9WxQCOU= +k8s.io/metrics v0.35.3/go.mod h1:/O8UBb5QVyAekR2QvL/WWxskpdV1wVSEl4MSLAy4Ql4= +k8s.io/pod-security-admission v0.32.1/go.mod h1:psSkvN+noAracLrouPjVDID/7TiMWoHQLNoBTVCY/nw= +mvdan.cc/gofumpt v0.6.0/go.mod h1:4L0wf+kgIPZtcCWXynNS2e6bhmj73umwnuXSZarixzA= +mvdan.cc/unparam v0.0.0-20240528143540-8a5130ca722f/go.mod h1:RSLa7mKKCNeTTMHBw5Hsy2rfJmd6O2ivt9Dw9ZqCQpQ= +nhooyr.io/websocket v1.8.11/go.mod h1:rN9OFWIUwuxg4fR5tELlYC04bXYowCP9GX47ivo2l+c= +open-cluster-management.io/api v0.15.0/go.mod h1:9erZEWEn4bEqh0nIX2wA7f/s3KCuFycQdBrPrRzi0QM= +oras.land/oras-go v1.2.5 h1:XpYuAwAb0DfQsunIyMfeET92emK8km3W4yEzZvUbsTo= +oras.land/oras-go v1.2.5/go.mod h1:PuAwRShRZCsZb7g8Ar3jKKQR/2A/qN+pkYxIOd/FAoo= +sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.14.3/go.mod h1:BgHrVkRmx7iWCumslrUpxE6BX474IrMXc+7R0RpV+E8= +sigs.k8s.io/kube-storage-version-migrator v0.0.6-0.20230721195810-5c8923c5ff96/go.mod h1:EOBQyBowOUsd7U4CJnMHNE0ri+zCXyouGdLwC/jZU+I= +sigs.k8s.io/kustomize/kustomize/v5 v5.7.1/go.mod h1:+5/SrBcJ4agx1SJknGuR/c9thwRSKLxnKoI5BzXFaLU= +sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps= +tags.cncf.io/container-device-interface v0.8.1/go.mod h1:Apb7N4VdILW0EVdEMRYXIDVRZfNJZ+kmEUss2kRRQ6Y= +tags.cncf.io/container-device-interface/specs-go v0.8.0/go.mod h1:BhJIkjjPh4qpys+qm4DAYtUyryaTDg9zris+AczXyws= diff --git a/scripts/compliance-operator-importer/.dockerignore b/scripts/compliance-operator-importer/.dockerignore new file mode 100644 index 0000000000000..c008207f5a26d --- /dev/null +++ b/scripts/compliance-operator-importer/.dockerignore @@ -0,0 +1,2 @@ +* +!compliance-operator-importer diff --git a/scripts/compliance-operator-importer/.gitignore b/scripts/compliance-operator-importer/.gitignore new file mode 100644 index 0000000000000..4deffd5466199 --- /dev/null +++ b/scripts/compliance-operator-importer/.gitignore @@ -0,0 +1,2 @@ +bin/ +/compliance-operator-importer diff --git a/scripts/compliance-operator-importer/DECISIONS.md b/scripts/compliance-operator-importer/DECISIONS.md new file mode 100644 index 0000000000000..cd23fb693a5c1 --- /dev/null +++ b/scripts/compliance-operator-importer/DECISIONS.md @@ -0,0 +1,56 @@ +# V1 Scope Freeze: CO -> ACS Importer + +## Status + +This document freezes Phase 1 behavior. Any deviation requires updating this file and corresponding specs. + +## Frozen decisions + +1. **Execution model** + - Standalone external importer only. + - No runtime/product code changes in Sensor/Central/ACS backend. + +2. **Importer mode** + - Phase 1 is create-only. + - Importer may create new ACS scan configs. + - Importer must never update existing ACS scan configs. + +3. **Implementation language** + - Use **Go** for Phase 1 implementation. + - Do not implement Phase 1 importer in bash/shell. + - Python is an acceptable future alternative only if explicitly re-decided in this file. + +4. **Existing-name behavior** + - If `scanName` already exists in ACS, skip resource. + - Add one entry to `problems[]` with clear `description` and `fixHint`. + +5. **Error handling model** + - Resource-level issue => skip resource, continue processing, emit `problems[]` entry. + - Fatal preflight/config issue => fail run before resource processing. + +6. **Cluster targeting** + - Source cluster selected like `kubectl` (current context by default, optional context override). + - Single destination ACS cluster ID via `--acs-cluster-id`. + +7. **ACS authentication model** + - Default auth mode is token (`ACS_API_TOKEN` via `--acs-token-env`). + - Optional basic-auth mode is allowed for local/dev environments. + - Basic mode uses username/password inputs and the same preflight endpoint checks. + +8. **Profile kind fallback** + - Missing `ScanSettingBinding.profiles[].kind` defaults to `Profile` (profiles is a top-level field, not under spec). + +9. **Schedule conversion** + - Convert valid CO cron to ACS schedule fields. + - Conversion failure => skip resource + `problems[]` entry with remediation hint. + +10. **Provenance marker** + +- Not required in Phase 1 create-only mode. +- Can be revisited in a future update/reconcile phase. + +## Deferred to Phase 2 (out of scope) + +- Update/reconcile mode (`PUT`) for existing configs. +- Ownership/provenance-based update guard. +- Multi-target cluster mapping per binding. diff --git a/scripts/compliance-operator-importer/Dockerfile b/scripts/compliance-operator-importer/Dockerfile new file mode 100644 index 0000000000000..6ec55de4a1c4b --- /dev/null +++ b/scripts/compliance-operator-importer/Dockerfile @@ -0,0 +1,10 @@ +FROM registry.access.redhat.com/ubi9-micro:latest + +LABEL org.opencontainers.image.title="co-acs-importer" \ + org.opencontainers.image.description="Compliance Operator to ACS scan configuration importer" \ + org.opencontainers.image.source="https://github.com/stackrox/stackrox" + +COPY compliance-operator-importer /compliance-operator-importer + +USER 65534:65534 +ENTRYPOINT ["/compliance-operator-importer"] diff --git a/scripts/compliance-operator-importer/Makefile b/scripts/compliance-operator-importer/Makefile new file mode 100644 index 0000000000000..52678ea31f499 --- /dev/null +++ b/scripts/compliance-operator-importer/Makefile @@ -0,0 +1,70 @@ +BINARY := compliance-operator-importer +MODULE := ./cmd/importer +IMAGE ?= localhost/compliance-operator-importer +TAG ?= latest +ARCHS ?= amd64 arm64 +GOARCH ?= $(shell go env GOARCH) +CONTAINER ?= $(shell command -v podman 2>/dev/null || echo docker) + +.PHONY: build test image image-push image-multiarch clean \ + demo-seed demo-seed-down demo-seed-status demo help test-v + +## ── Build ────────────────────────────────────────────────────────────────── + +build: ## Build the importer binary + CGO_ENABLED=0 go build -o $(BINARY) $(MODULE) + +## ── Test ─────────────────────────────────────────────────────────────────── + +test: ## Run all unit tests + go test ./... + +test-v: ## Run all unit tests (verbose) + go test -v ./... + +## ── Container image ──────────────────────────────────────────────────────── + +image: ## Build container image for host arch + CGO_ENABLED=0 GOOS=linux GOARCH=$(GOARCH) go build -o $(BINARY) $(MODULE) + $(CONTAINER) build -t $(IMAGE):$(TAG) . + +image-multiarch: ## Build per-arch images (use before image-push) + @for arch in $(ARCHS); do \ + echo "── Building $(IMAGE):$(TAG)-$$arch ──"; \ + CGO_ENABLED=0 GOOS=linux GOARCH=$$arch go build -o $(BINARY) $(MODULE) && \ + $(CONTAINER) build --platform linux/$$arch -t $(IMAGE):$(TAG)-$$arch . ; \ + done + +image-push: image-multiarch ## Push multi-arch images and create manifest + @for arch in $(ARCHS); do \ + $(CONTAINER) push $(IMAGE):$(TAG)-$$arch ; \ + done + $(CONTAINER) manifest rm $(IMAGE):$(TAG) 2>/dev/null || true + $(CONTAINER) manifest create $(IMAGE):$(TAG) \ + $(foreach arch,$(ARCHS),$(IMAGE):$(TAG)-$(arch)) + $(CONTAINER) manifest push $(IMAGE):$(TAG) + @echo "" + @echo "Pushed: $(IMAGE):$(TAG)" + +## ── Demo ─────────────────────────────────────────────────────────────────── + +demo-seed: ## Seed demo fixtures (2 ACS scans + 3 SSBs, 1 conflicting) + ./hack/demo-seed.sh up + +demo-seed-down: ## Remove demo fixtures + ./hack/demo-seed.sh down + +demo-seed-status: ## Show demo fixture state + ./hack/demo-seed.sh status + +demo: build ## Run interactive demo + ./hack/demo.sh + +## ── Misc ─────────────────────────────────────────────────────────────────── + +clean: ## Remove build artifacts + rm -f $(BINARY) + +help: ## Show this help + @grep -E '^[a-zA-Z_-]+:.*##' $(MAKEFILE_LIST) | \ + awk 'BEGIN {FS = ":.*## "}; {printf " \033[36m%-18s\033[0m %s\n", $$1, $$2}' diff --git a/scripts/compliance-operator-importer/README.md b/scripts/compliance-operator-importer/README.md new file mode 100644 index 0000000000000..071691fba9cc3 --- /dev/null +++ b/scripts/compliance-operator-importer/README.md @@ -0,0 +1,152 @@ +# CO → ACS Scheduled Scan Importer + +Reads Compliance Operator `ScanSettingBinding` resources from one or more +Kubernetes clusters and creates equivalent scan configurations in Red Hat +Advanced Cluster Security (ACS) via the v2 API. + +## Quick start + +```bash +# Build +go build -o compliance-operator-importer ./cmd/importer + +# Dry run (preview, no changes) +ROX_API_TOKEN= ./compliance-operator-importer \ + --endpoint central.example.com \ + --dry-run + +# Import for real +ROX_API_TOKEN= ./compliance-operator-importer \ + --endpoint central.example.com +``` + +## Authentication + +Auth mode is auto-inferred from environment variables: + +| Variable | Mode | Typical use | +|----------|------|-------------| +| `ROX_API_TOKEN` | API token (Bearer) | Production | +| `ROX_ADMIN_PASSWORD` | Basic auth | Development/testing | + +Setting both is an error. Setting neither is an error. + +For basic auth the username defaults to `admin`; override with `--username` +or `ROX_ADMIN_USER`. + +## Multi-cluster + +By default all contexts in the merged kubeconfig are processed. Merge +multiple kubeconfig files via the standard `KUBECONFIG` variable: + +```bash +KUBECONFIG=cluster-a.yaml:cluster-b.yaml ./compliance-operator-importer --endpoint central.example.com +``` + +Use `--context` (repeatable) to limit processing to specific contexts: + +```bash +./compliance-operator-importer --endpoint central.example.com \ + --context prod-east \ + --context prod-west +``` + +When the same `ScanSettingBinding` name appears across multiple clusters, +the importer merges them into a single ACS scan configuration targeting all +matched clusters (profiles and schedules must match). + +## Cluster ID auto-discovery + +The ACS cluster ID for each context is auto-discovered using the first +successful method: + +1. `admission-control` ConfigMap → `cluster-id` key (namespace: `stackrox`) +2. OpenShift `ClusterVersion` `spec.clusterID` → matched against ACS provider metadata +3. `helm-effective-cluster-name` Secret → matched against ACS cluster name + +## Flags + +| Flag | Default | Description | +|------|---------|-------------| +| `--endpoint` | `ROX_ENDPOINT` | ACS Central URL (bare hostnames get `https://` prepended) | +| `--username` | `admin` | Basic auth username (`ROX_ADMIN_USER`) | +| `--context` | all | Kubeconfig context to process (repeatable) | +| `--co-namespace` | `openshift-compliance` | Namespace for CO resources | +| `--co-all-namespaces` | `false` | Read CO resources from all namespaces | +| `--dry-run` | `false` | Preview actions without changes | +| `--overwrite-existing` | `false` | Update existing ACS configs instead of skipping | +| `--report-json` | — | Write structured JSON report to file | +| `--max-retries` | `5` | Retry attempts for transient API errors (429, 502–504) | +| `--request-timeout` | `30s` | Per-request HTTP timeout | +| `--ca-cert-file` | — | PEM CA bundle for TLS | +| `--insecure-skip-verify` | `false` | Skip TLS verification | + +## Behaviour + +- **Create-only (default):** existing ACS scan configs with the same name + are skipped with a warning. +- **Overwrite mode** (`--overwrite-existing`): existing configs are updated + via PUT to match the cluster SSBs. +- **Idempotent:** re-running produces the same result; no duplicates. +- **Dry run:** all discovery and mapping runs normally; no POST/PUT issued. + +## Exit codes + +| Code | Meaning | +|------|---------| +| `0` | All bindings processed (or nothing to do) | +| `1` | Fatal error (config, auth, connectivity) | +| `2` | Partial success (some bindings failed; see report) | + +## Mapping rules + +Each `ScanSettingBinding` maps to one ACS scan configuration: + +| ACS field | Source | +|-----------|--------| +| `scanName` | `ScanSettingBinding.metadata.name` | +| `profiles` | Sorted, deduplicated profile names from the binding | +| `scanSchedule` | Converted from the referenced `ScanSetting.schedule` (cron) | +| `clusters` | Auto-discovered ACS cluster ID(s) | +| `description` | `"Imported from CO ScanSettingBinding / (ScanSetting: )"` | + +Supported cron patterns: daily (`M H * * *`), weekly (`M H * * DOW`), +monthly (`M H DOM * *`). Step and range notation are not supported. + +## JSON report + +When `--report-json` is set, a structured report is written: + +```json +{ + "meta": { "timestamp": "...", "dryRun": false, "mode": "create-only" }, + "counts": { "discovered": 3, "create": 2, "update": 0, "skip": 1, "failed": 0 }, + "items": [ { "source": {...}, "action": "create", ... } ], + "problems": [] +} +``` + +## Demo / testing + +Seed demo fixtures (2 ACS scans + 3 SSBs, 1 conflicting): + +```bash +ROX_ADMIN_PASSWORD=admin ROX_ENDPOINT=central.example.com ./hack/demo-seed.sh up +./hack/demo-seed.sh status +./hack/demo-seed.sh down +``` + +Interactive walkthrough: + +```bash +ROX_ADMIN_PASSWORD=admin ROX_ENDPOINT=central.example.com ./hack/demo.sh +``` + +## Development + +Specs live in `specs/` and are the source of truth. Tests reference spec IDs +(`IMP-*`). Run tests: + +```bash +go test ./... +``` diff --git a/scripts/compliance-operator-importer/cmd/importer/main.go b/scripts/compliance-operator-importer/cmd/importer/main.go new file mode 100644 index 0000000000000..9b59d0c6ba9a3 --- /dev/null +++ b/scripts/compliance-operator-importer/cmd/importer/main.go @@ -0,0 +1,71 @@ +// Binary co-acs-scan-importer reads Compliance Operator ScanSettingBinding +// resources from Kubernetes clusters and creates equivalent ACS compliance +// scan configurations through the ACS v2 API. +// +// Run with --help for full usage information and examples. +package main + +import ( + "context" + "fmt" + "os" + + "github.com/stackrox/co-acs-importer/internal/acs" + "github.com/stackrox/co-acs-importer/internal/config" + "github.com/stackrox/co-acs-importer/internal/preflight" + "github.com/stackrox/co-acs-importer/internal/run" + "github.com/stackrox/co-acs-importer/internal/status" +) + +func main() { + os.Exit(mainWithCode()) +} + +func mainWithCode() int { + cfg, err := config.ParseAndValidate(os.Args[1:]) + if err != nil { + if err == config.ErrHelpRequested { + return 0 + } + fmt.Fprintf(os.Stderr, "ERROR: %v\n", err) + return run.ExitFatalError + } + + s := status.New() + ctx := context.Background() + + // Preflight check before any resource processing. + s.Stage("Preflight", "checking ACS connectivity and credentials") + if err := preflight.Run(ctx, cfg); err != nil { + s.Failf("%v", err) + return run.ExitFatalError + } + s.OKf("ACS endpoint is reachable at %s", cfg.ACSEndpoint) + + acsClient, err := acs.NewClient(cfg) + if err != nil { + s.Failf("failed to create ACS client: %v", err) + return run.ExitFatalError + } + + // Build cluster sources from kubeconfig contexts. + if len(cfg.Contexts) > 0 { + s.Stagef("Discovery", "resolving %d specified contexts", len(cfg.Contexts)) + } else { + s.Stage("Discovery", "resolving all kubeconfig contexts") + } + sources, err := run.BuildClusterSources(ctx, cfg, acsClient) + if err != nil { + s.Failf("%v", err) + return run.ExitFatalError + } + for _, src := range sources { + s.OKf("%s → %s", src.Label, src.ACSClusterID) + } + + if len(sources) == 1 { + cfg.ACSClusterID = sources[0].ACSClusterID + return run.NewRunner(cfg, acsClient, sources[0].COClient).Run(ctx) + } + return run.NewRunner(cfg, acsClient, nil).RunMultiCluster(ctx, sources) +} diff --git a/scripts/compliance-operator-importer/e2e/doc.go b/scripts/compliance-operator-importer/e2e/doc.go new file mode 100644 index 0000000000000..0680e78e92b3c --- /dev/null +++ b/scripts/compliance-operator-importer/e2e/doc.go @@ -0,0 +1,3 @@ +// Package e2e contains end-to-end acceptance tests that run against a real +// ACS + Compliance Operator cluster. Tests require the "e2e" build tag. +package e2e diff --git a/scripts/compliance-operator-importer/e2e/e2e_test.go b/scripts/compliance-operator-importer/e2e/e2e_test.go new file mode 100644 index 0000000000000..5ac5dd5aad081 --- /dev/null +++ b/scripts/compliance-operator-importer/e2e/e2e_test.go @@ -0,0 +1,611 @@ +//go:build e2e + +// Package e2e runs acceptance tests against a real ACS + Compliance Operator +// cluster. These tests exercise the importer binary end-to-end. +// +// Required environment: +// +// ROX_ENDPOINT ACS Central URL (bare hostname OK, https:// prepended) +// ROX_ADMIN_PASSWORD Basic auth password (or ROX_API_TOKEN for token auth) +// +// Optional: +// +// CO_NAMESPACE CO namespace (default: openshift-compliance) +// E2E_KEEP_CONFIGS Set to "1" to skip cleanup of created scan configs +// +// Run: +// +// go test -tags e2e -v -count=1 ./e2e/ +// # or via the convenience wrapper: +// hack/run-e2e.sh +package e2e + +import ( + "bytes" + "context" + "crypto/tls" + "encoding/json" + "fmt" + "io" + "net/http" + "os" + "os/exec" + "path/filepath" + "strings" + "testing" + "time" +) + +// --------------------------------------------------------------------------- +// Global state set in TestMain +// --------------------------------------------------------------------------- + +var ( + importerBin string // path to compiled binary + endpoint string // ACS Central URL (with https://) + coNamespace string +) + +func TestMain(m *testing.M) { + endpoint = os.Getenv("ROX_ENDPOINT") + if endpoint == "" { + fmt.Fprintln(os.Stderr, "SKIP: ROX_ENDPOINT not set") + os.Exit(0) + } + if !strings.HasPrefix(endpoint, "https://") { + endpoint = "https://" + endpoint + } + + hasToken := os.Getenv("ROX_API_TOKEN") != "" + hasPassword := os.Getenv("ROX_ADMIN_PASSWORD") != "" + if !hasToken && !hasPassword { + fmt.Fprintln(os.Stderr, "SKIP: neither ROX_API_TOKEN nor ROX_ADMIN_PASSWORD set") + os.Exit(0) + } + + coNamespace = os.Getenv("CO_NAMESPACE") + if coNamespace == "" { + coNamespace = "openshift-compliance" + } + + // Build the importer binary. + tmpDir, err := os.MkdirTemp("", "co-importer-e2e-*") + if err != nil { + fmt.Fprintf(os.Stderr, "FAIL: mktemp: %v\n", err) + os.Exit(1) + } + importerBin = filepath.Join(tmpDir, "co-acs-scan-importer") + + cmd := exec.Command("go", "build", "-o", importerBin, "./cmd/importer/") + cmd.Dir = filepath.Join(mustGetwd(), "..") + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + if err := cmd.Run(); err != nil { + fmt.Fprintf(os.Stderr, "FAIL: build importer: %v\n", err) + os.Exit(1) + } + fmt.Fprintf(os.Stderr, "Built importer: %s\n", importerBin) + + code := m.Run() + + os.RemoveAll(tmpDir) + os.Exit(code) +} + +func mustGetwd() string { + d, err := os.Getwd() + if err != nil { + panic(err) + } + return d +} + +// --------------------------------------------------------------------------- +// Helpers +// --------------------------------------------------------------------------- + +// importerResult captures a single importer invocation. +type importerResult struct { + exitCode int + stdout string + stderr string + report *report // nil if no --report-json +} + +// report mirrors the JSON report structure (subset). +type report struct { + Meta struct { + DryRun bool `json:"dryRun"` + NamespaceScope string `json:"namespaceScope"` + Mode string `json:"mode"` + } `json:"meta"` + Counts struct { + Discovered int `json:"discovered"` + Create int `json:"create"` + Update int `json:"update"` + Skip int `json:"skip"` + Failed int `json:"failed"` + } `json:"counts"` + Items []reportItem `json:"items"` + Problems []problem `json:"problems"` +} + +type reportItem struct { + Source struct { + Namespace string `json:"namespace"` + BindingName string `json:"bindingName"` + ScanSettingName string `json:"scanSettingName"` + } `json:"source"` + Action string `json:"action"` + Reason string `json:"reason"` + Attempts int `json:"attempts"` + ACSScanConfigID string `json:"acsScanConfigId"` + Error string `json:"error"` +} + +type problem struct { + Severity string `json:"severity"` + Category string `json:"category"` + ResourceRef string `json:"resourceRef"` + Description string `json:"description"` + FixHint string `json:"fixHint"` + Skipped bool `json:"skipped"` +} + +// runImporter executes the importer binary with the given extra args. +// It always passes --endpoint, --insecure-skip-verify, and --co-namespace. +// If reportJSON is true, a temp file is used and the report is parsed. +func runImporter(t *testing.T, reportJSON bool, extraArgs ...string) importerResult { + t.Helper() + + args := []string{ + "--endpoint", endpoint, + "--insecure-skip-verify", + "--co-namespace", coNamespace, + } + args = append(args, extraArgs...) + + var reportPath string + if reportJSON { + f, err := os.CreateTemp("", "e2e-report-*.json") + if err != nil { + t.Fatalf("create temp report file: %v", err) + } + f.Close() + reportPath = f.Name() + t.Cleanup(func() { os.Remove(reportPath) }) + args = append(args, "--report-json", reportPath) + } + + cmd := exec.CommandContext(context.Background(), importerBin, args...) + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + err := cmd.Run() + exitCode := 0 + if err != nil { + if exitErr, ok := err.(*exec.ExitError); ok { + exitCode = exitErr.ExitCode() + } else { + t.Fatalf("exec importer: %v", err) + } + } + + result := importerResult{ + exitCode: exitCode, + stdout: stdout.String(), + stderr: stderr.String(), + } + + if reportJSON && reportPath != "" { + data, err := os.ReadFile(reportPath) + if err == nil && len(data) > 0 { + var r report + if err := json.Unmarshal(data, &r); err != nil { + t.Logf("WARNING: report JSON parse error: %v", err) + } else { + result.report = &r + } + } + } + + return result +} + +// acsConfigSummary is a scan config from the ACS list API. +type acsConfigSummary struct { + ID string `json:"id"` + ScanName string `json:"scanName"` +} + +// acsListConfigs returns all scan configurations from ACS. +func acsListConfigs(t *testing.T) []acsConfigSummary { + t.Helper() + body := acsGet(t, "/v2/compliance/scan/configurations?pagination.limit=1000") + + var resp struct { + Configurations []acsConfigSummary `json:"configurations"` + } + if err := json.Unmarshal(body, &resp); err != nil { + t.Fatalf("parse ACS list response: %v", err) + } + return resp.Configurations +} + +// acsDeleteConfig deletes a scan config by ID. +func acsDeleteConfig(t *testing.T, id string) { + t.Helper() + req := acsRequest(t, http.MethodDelete, "/v2/compliance/scan/configurations/"+id, nil) + resp, err := acsHTTPClient().Do(req) + if err != nil { + t.Logf("WARNING: delete scan config %s: %v", id, err) + return + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { + t.Logf("WARNING: delete scan config %s: HTTP %d", id, resp.StatusCode) + } +} + +// acsGet does a GET request to ACS and returns the body. +func acsGet(t *testing.T, path string) []byte { + t.Helper() + req := acsRequest(t, http.MethodGet, path, nil) + resp, err := acsHTTPClient().Do(req) + if err != nil { + t.Fatalf("ACS GET %s: %v", path, err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + t.Fatalf("ACS GET %s: HTTP %d", path, resp.StatusCode) + } + body, err := io.ReadAll(resp.Body) + if err != nil { + t.Fatalf("ACS GET %s: read body: %v", path, err) + } + return body +} + +func acsRequest(t *testing.T, method, path string, body io.Reader) *http.Request { + t.Helper() + url := endpoint + path + req, err := http.NewRequest(method, url, body) + if err != nil { + t.Fatalf("build ACS request: %v", err) + } + req.Header.Set("Accept", "application/json") + + if token := os.Getenv("ROX_API_TOKEN"); token != "" { + req.Header.Set("Authorization", "Bearer "+token) + } else { + user := os.Getenv("ROX_ADMIN_USER") + if user == "" { + user = "admin" + } + req.SetBasicAuth(user, os.Getenv("ROX_ADMIN_PASSWORD")) + } + return req +} + +func acsHTTPClient() *http.Client { + return &http.Client{ + Timeout: 30 * time.Second, + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, //nolint:gosec // e2e test + }, + } +} + +// configIDsByPrefix returns the IDs of scan configs whose name starts with prefix. +func configIDsByPrefix(t *testing.T, prefix string) []string { + t.Helper() + var ids []string + for _, c := range acsListConfigs(t) { + if strings.HasPrefix(c.ScanName, prefix) { + ids = append(ids, c.ID) + } + } + return ids +} + +// cleanupConfigsByPrefix deletes all scan configs matching prefix, unless +// E2E_KEEP_CONFIGS is set. +func cleanupConfigsByPrefix(t *testing.T, prefix string) { + if os.Getenv("E2E_KEEP_CONFIGS") == "1" { + t.Logf("E2E_KEEP_CONFIGS=1, skipping cleanup for prefix %q", prefix) + return + } + for _, id := range configIDsByPrefix(t, prefix) { + acsDeleteConfig(t, id) + t.Logf("cleaned up scan config %s", id) + } +} + +// scanConfigExists returns true if a scan config with the given name exists. +func scanConfigExists(t *testing.T, name string) bool { + t.Helper() + for _, c := range acsListConfigs(t) { + if c.ScanName == name { + return true + } + } + return false +} + +// countSSBs returns the number of ScanSettingBindings in CO_NAMESPACE. +func countSSBs(t *testing.T) int { + t.Helper() + cmd := exec.Command("kubectl", "get", "scansettingbindings.compliance.openshift.io", + "-n", coNamespace, "-o", "json") + out, err := cmd.Output() + if err != nil { + t.Fatalf("kubectl list SSBs: %v", err) + } + var list struct { + Items []json.RawMessage `json:"items"` + } + if err := json.Unmarshal(out, &list); err != nil { + t.Fatalf("parse SSB list: %v", err) + } + return len(list.Items) +} + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +// TestIMP_ACC_001_COResourcesListable verifies that CO resources can be listed +// from the target cluster. +func TestIMP_ACC_001_COResourcesListable(t *testing.T) { + for _, resource := range []string{ + "scansettingbindings.compliance.openshift.io", + "scansettings.compliance.openshift.io", + "profiles.compliance.openshift.io", + } { + t.Run(resource, func(t *testing.T) { + cmd := exec.Command("kubectl", "get", resource, "-n", coNamespace) + if out, err := cmd.CombinedOutput(); err != nil { + t.Fatalf("kubectl get %s failed: %v\n%s", resource, err, out) + } + }) + } +} + +// TestIMP_ACC_002_AuthPreflight verifies that the importer can authenticate +// with ACS (both preflight probe and actual listing work). +func TestIMP_ACC_002_AuthPreflight(t *testing.T) { + // Just verify the ACS API is reachable with current creds. + body := acsGet(t, "/v2/compliance/scan/configurations?pagination.limit=1") + if len(body) == 0 { + t.Fatal("empty response from ACS preflight probe") + } +} + +// TestIMP_ACC_003_DryRunNoWrites verifies that dry-run produces no changes. +func TestIMP_ACC_003_DryRunNoWrites(t *testing.T) { + // Snapshot existing configs. + before := acsListConfigs(t) + + result := runImporter(t, true, "--dry-run") + + if result.exitCode != 0 && result.exitCode != 2 { + t.Fatalf("IMP-ACC-003: expected exit code 0 or 2, got %d\nstdout: %s\nstderr: %s", + result.exitCode, result.stdout, result.stderr) + } + + if result.report == nil { + t.Fatal("IMP-ACC-003: expected report JSON to be written") + } + if !result.report.Meta.DryRun { + t.Error("IMP-ACC-003: report meta.dryRun should be true") + } + + // Verify no new configs were created. + after := acsListConfigs(t) + if len(after) != len(before) { + t.Errorf("IMP-ACC-003: config count changed from %d to %d during dry-run", + len(before), len(after)) + } +} + +// TestIMP_ACC_004_ApplyCreatesConfigs verifies that apply mode creates +// ACS scan configs for discovered SSBs. +func TestIMP_ACC_004_ApplyCreatesConfigs(t *testing.T) { + nSSBs := countSSBs(t) + if nSSBs == 0 { + t.Skip("no ScanSettingBindings found in namespace " + coNamespace) + } + + result := runImporter(t, true) + + if result.exitCode != 0 && result.exitCode != 2 { + t.Fatalf("IMP-ACC-004: expected exit code 0 or 2, got %d\nstdout: %s\nstderr: %s", + result.exitCode, result.stdout, result.stderr) + } + + if result.report == nil { + t.Fatal("IMP-ACC-004: expected report") + } + + t.Logf("Discovered: %d, Created: %d, Skipped: %d, Failed: %d", + result.report.Counts.Discovered, + result.report.Counts.Create, + result.report.Counts.Skip, + result.report.Counts.Failed, + ) + + if result.report.Counts.Discovered == 0 { + t.Error("IMP-ACC-004: expected at least 1 discovered binding") + } + + // Verify created configs exist in ACS. + for _, item := range result.report.Items { + if item.Action == "create" && item.ACSScanConfigID != "" { + t.Logf("Created: %s (id=%s)", item.Source.BindingName, item.ACSScanConfigID) + } + } + + // Cleanup: delete configs we created. + t.Cleanup(func() { + for _, item := range result.report.Items { + if item.Action == "create" && item.ACSScanConfigID != "" { + acsDeleteConfig(t, item.ACSScanConfigID) + } + } + }) +} + +// TestIMP_ACC_005_IdempotentSecondRun verifies that a second run with the same +// inputs produces only skip actions (no new creates). +func TestIMP_ACC_005_IdempotentSecondRun(t *testing.T) { + nSSBs := countSSBs(t) + if nSSBs == 0 { + t.Skip("no ScanSettingBindings") + } + + // First run: create. + r1 := runImporter(t, true) + if r1.exitCode != 0 && r1.exitCode != 2 { + t.Fatalf("first run exit code %d", r1.exitCode) + } + + var createdIDs []string + if r1.report != nil { + for _, item := range r1.report.Items { + if item.Action == "create" && item.ACSScanConfigID != "" { + createdIDs = append(createdIDs, item.ACSScanConfigID) + } + } + } + + t.Cleanup(func() { + for _, id := range createdIDs { + acsDeleteConfig(t, id) + } + }) + + // Second run: should be all skips. + r2 := runImporter(t, true) + if r2.exitCode != 0 && r2.exitCode != 2 { + t.Fatalf("IMP-ACC-005: second run exit code %d", r2.exitCode) + } + + if r2.report != nil && r2.report.Counts.Create > 0 { + t.Errorf("IMP-ACC-005: second run created %d configs (expected 0)", r2.report.Counts.Create) + } +} + +// TestIMP_ACC_007_InvalidTokenFailsFast verifies that an invalid token +// produces exit code 1 (fatal). +func TestIMP_ACC_007_InvalidTokenFailsFast(t *testing.T) { + // Override auth with a bad token. + cmd := exec.Command(importerBin, + "--endpoint", endpoint, + "--insecure-skip-verify", + "--co-namespace", coNamespace, + ) + cmd.Env = append(os.Environ(), + "ROX_API_TOKEN=definitely-not-a-valid-token", + "ROX_ADMIN_PASSWORD=", // clear password to avoid ambiguous auth + ) + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + err := cmd.Run() + exitCode := 0 + if exitErr, ok := err.(*exec.ExitError); ok { + exitCode = exitErr.ExitCode() + } + + if exitCode != 1 { + t.Errorf("IMP-ACC-007: expected exit code 1 for invalid token, got %d\nstdout: %s\nstderr: %s", + exitCode, stdout.String(), stderr.String()) + } +} + +// TestIMP_ACC_012_ProblemsHaveFixHints verifies that all problems in the +// report include description and fixHint fields. +func TestIMP_ACC_012_ProblemsHaveFixHints(t *testing.T) { + result := runImporter(t, true, "--dry-run") + + if result.report == nil { + t.Skip("no report generated") + } + + for i, p := range result.report.Problems { + if p.Description == "" { + t.Errorf("IMP-ACC-012: problem[%d] has empty description", i) + } + if p.FixHint == "" { + t.Errorf("IMP-ACC-012: problem[%d] has empty fixHint (description: %s)", i, p.Description) + } + } +} + +// TestIMP_ACC_017_AutoDiscoverClusterID verifies that the importer can +// auto-discover the ACS cluster ID without --cluster. +func TestIMP_ACC_017_AutoDiscoverClusterID(t *testing.T) { + result := runImporter(t, true, "--dry-run") + + if result.exitCode == 1 { + // Check if it's an auto-discovery failure. + combined := result.stdout + result.stderr + if strings.Contains(combined, "discover cluster ID") { + t.Fatalf("IMP-ACC-017: auto-discovery failed:\n%s", combined) + } + } + + // If exit 0 or 2, auto-discovery succeeded (it's used implicitly when + // no --cluster is given). + if result.exitCode != 0 && result.exitCode != 2 { + t.Errorf("IMP-ACC-017: expected exit 0 or 2, got %d\nstdout: %s\nstderr: %s", + result.exitCode, result.stdout, result.stderr) + } +} + +// TestIMP_ACC_014_OverwriteExistingUpdates verifies that --overwrite-existing +// updates existing scan configs instead of skipping them. +func TestIMP_ACC_014_OverwriteExistingUpdates(t *testing.T) { + nSSBs := countSSBs(t) + if nSSBs == 0 { + t.Skip("no ScanSettingBindings") + } + + // First run: create. + r1 := runImporter(t, true) + if r1.report == nil || r1.report.Counts.Create == 0 { + // Nothing was created (maybe everything already exists). Create-then-overwrite + // test only makes sense when we create something. + t.Skip("no new configs created in first run") + } + + var createdIDs []string + for _, item := range r1.report.Items { + if item.Action == "create" && item.ACSScanConfigID != "" { + createdIDs = append(createdIDs, item.ACSScanConfigID) + } + } + t.Cleanup(func() { + for _, id := range createdIDs { + acsDeleteConfig(t, id) + } + }) + + // Second run with --overwrite-existing: should update, not skip. + r2 := runImporter(t, true, "--overwrite-existing") + if r2.exitCode != 0 && r2.exitCode != 2 { + t.Fatalf("overwrite run exit code %d", r2.exitCode) + } + + if r2.report == nil { + t.Fatal("IMP-ACC-014: expected report from overwrite run") + } + + if r2.report.Counts.Update == 0 && r2.report.Counts.Skip > 0 { + t.Error("IMP-ACC-014: expected updates with --overwrite-existing, got only skips") + } + t.Logf("Overwrite run: updated=%d, created=%d, skipped=%d", + r2.report.Counts.Update, r2.report.Counts.Create, r2.report.Counts.Skip) +} diff --git a/scripts/compliance-operator-importer/go.mod b/scripts/compliance-operator-importer/go.mod new file mode 100644 index 0000000000000..312ad91530093 --- /dev/null +++ b/scripts/compliance-operator-importer/go.mod @@ -0,0 +1,35 @@ +module github.com/stackrox/co-acs-importer + +go 1.25.0 + +require ( + k8s.io/apimachinery v0.35.3 + k8s.io/client-go v0.35.3 +) + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/fxamacker/cbor/v2 v2.9.0 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/spf13/pflag v1.0.9 // indirect + github.com/x448/float16 v0.8.4 // indirect + go.yaml.in/yaml/v2 v2.4.3 // indirect + golang.org/x/net v0.47.0 // indirect + golang.org/x/oauth2 v0.30.0 // indirect + golang.org/x/sys v0.38.0 // indirect + golang.org/x/term v0.37.0 // indirect + golang.org/x/text v0.31.0 // indirect + golang.org/x/time v0.9.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 // indirect + k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 // indirect + sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect + sigs.k8s.io/randfill v1.0.0 // indirect + sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect + sigs.k8s.io/yaml v1.6.0 // indirect +) diff --git a/scripts/compliance-operator-importer/go.sum b/scripts/compliance-operator-importer/go.sum new file mode 100644 index 0000000000000..3dc8c6782020e --- /dev/null +++ b/scripts/compliance-operator-importer/go.sum @@ -0,0 +1,94 @@ +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= +github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= +github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= +github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= +github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= +github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo= +github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY= +github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= +go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= +golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= +golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= +golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= +golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= +golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= +golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU= +golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254= +golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= +golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= +golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= +golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= +google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/evanphx/json-patch.v4 v4.13.0 h1:czT3CmqEaQ1aanPc5SdlgQrrEIb8w/wwCvWWnfEbYzo= +gopkg.in/evanphx/json-patch.v4 v4.13.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +k8s.io/api v0.35.3 h1:pA2fiBc6+N9PDf7SAiluKGEBuScsTzd2uYBkA5RzNWQ= +k8s.io/api v0.35.3/go.mod h1:9Y9tkBcFwKNq2sxwZTQh1Njh9qHl81D0As56tu42GA4= +k8s.io/apimachinery v0.35.3 h1:MeaUwQCV3tjKP4bcwWGgZ/cp/vpsRnQzqO6J6tJyoF8= +k8s.io/apimachinery v0.35.3/go.mod h1:jQCgFZFR1F4Ik7hvr2g84RTJSZegBc8yHgFWKn//hns= +k8s.io/client-go v0.35.3 h1:s1lZbpN4uI6IxeTM2cpdtrwHcSOBML1ODNTCCfsP1pg= +k8s.io/client-go v0.35.3/go.mod h1:RzoXkc0mzpWIDvBrRnD+VlfXP+lRzqQjCmKtiwZ8Q9c= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 h1:Y3gxNAuB0OBLImH611+UDZcmKS3g6CthxToOb37KgwE= +k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ= +k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 h1:SjGebBtkBqHFOli+05xYbK8YF1Dzkbzn+gDM4X9T4Ck= +k8s.io/utils v0.0.0-20251002143259-bc988d571ff4/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg= +sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= +sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= +sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= diff --git a/scripts/compliance-operator-importer/hack/.demo-seed-id b/scripts/compliance-operator-importer/hack/.demo-seed-id new file mode 100644 index 0000000000000..2c6374ac9b228 --- /dev/null +++ b/scripts/compliance-operator-importer/hack/.demo-seed-id @@ -0,0 +1 @@ +64005cb1 diff --git a/scripts/compliance-operator-importer/hack/check-spec-coverage.sh b/scripts/compliance-operator-importer/hack/check-spec-coverage.sh new file mode 100755 index 0000000000000..5b2aaf90ad323 --- /dev/null +++ b/scripts/compliance-operator-importer/hack/check-spec-coverage.sh @@ -0,0 +1,113 @@ +#!/usr/bin/env bash +# Checks that every IMP-* requirement ID defined in specs/ appears in at least +# one *_test.go file. Reports gaps and exits non-zero when any are found. +# +# USAGE: hack/check-spec-coverage.sh +# Run from the compliance-operator-importer directory. + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" +SPECS_DIR="$ROOT/specs" +SRC_DIR="$ROOT/internal" + +# Extract unique IMP-*-NNN IDs from spec files (markdown + feature). +# Handles both "IMP-FOO-001" and range notation "IMP-FOO-001..005". +extract_spec_ids() { + local ids=() + + # Direct IDs: IMP-XXX-NNN + while IFS= read -r id; do + ids+=("$id") + done < <(grep -ohrE 'IMP-[A-Z]+-[0-9]+' "$SPECS_DIR" | sort -u) + + # Range IDs: IMP-XXX-NNN..MMM → expand to individual IDs + while IFS= read -r range_match; do + local prefix num_start num_end + prefix=$(echo "$range_match" | grep -oE 'IMP-[A-Z]+-') + num_start=$(echo "$range_match" | grep -oE '[0-9]+' | head -1) + num_end=$(echo "$range_match" | grep -oE '[0-9]+' | tail -1) + # Strip leading zeros for arithmetic + local start=$((10#$num_start)) + local end=$((10#$num_end)) + local width=${#num_start} + for ((i = start; i <= end; i++)); do + ids+=("$(printf "%s%0${width}d" "$prefix" "$i")") + done + done < <(grep -ohrE 'IMP-[A-Z]+-[0-9]+\.\.[0-9]+' "$SPECS_DIR" | sort -u) + + # Deduplicate and sort + printf '%s\n' "${ids[@]}" | sort -u +} + +# Extract IDs referenced in test files. +# Matches both IMP-CLI-001 (in comments) and IMP_CLI_001 (in Go identifiers). +extract_test_ids() { + grep -ohrE 'IMP[-_][A-Z]+[-_][0-9]+' "$SRC_DIR" --include='*_test.go' \ + | sed 's/_/-/g' \ + | sort -u +} + +# IDs explicitly marked as "(removed)" in specs — no test needed. +extract_removed_ids() { + grep -E '\(removed' "$SPECS_DIR"/*.md "$SPECS_DIR"/*.feature 2>/dev/null \ + | grep -oE 'IMP-[A-Z]+-[0-9]+' \ + | sort -u +} + +spec_ids=$(extract_spec_ids) +test_ids=$(extract_test_ids) +removed_ids=$(extract_removed_ids) + +# IMP-ACC-* are acceptance test IDs (real-cluster tests, not unit tests). +# They are tracked separately and excluded from the gap report. +missing=() +covered=0 +skipped=0 +total=0 + +while IFS= read -r id; do + total=$((total + 1)) + + # Skip acceptance test IDs (IMP-ACC-*) + if [[ "$id" == IMP-ACC-* ]]; then + skipped=$((skipped + 1)) + continue + fi + + # Skip removed IDs + if echo "$removed_ids" | grep -qxF "$id"; then + skipped=$((skipped + 1)) + continue + fi + + if echo "$test_ids" | grep -qxF "$id"; then + covered=$((covered + 1)) + else + missing+=("$id") + fi +done <<< "$spec_ids" + +echo "Spec coverage report" +echo "====================" +echo "Total IDs in specs: $total" +echo "Covered by tests: $covered" +echo "Skipped (ACC/removed): $skipped" +echo "Missing test coverage: ${#missing[@]}" +echo "" + +if [[ ${#missing[@]} -gt 0 ]]; then + echo "GAPS (IDs with no *_test.go reference):" + for id in "${missing[@]}"; do + # Show which spec file defines this ID. + file=$(grep -rlE "\b${id}\b" "$SPECS_DIR" | head -1 | xargs basename 2>/dev/null || echo "?") + echo " $id ($file)" + done + echo "" + echo "FAIL: ${#missing[@]} requirement(s) lack test coverage." + exit 1 +else + echo "OK: all testable requirements are covered." + exit 0 +fi diff --git a/scripts/compliance-operator-importer/hack/demo-seed.sh b/scripts/compliance-operator-importer/hack/demo-seed.sh new file mode 100755 index 0000000000000..deb2e831850fd --- /dev/null +++ b/scripts/compliance-operator-importer/hack/demo-seed.sh @@ -0,0 +1,340 @@ +#!/usr/bin/env bash +# demo-seed.sh — Seed demo fixtures for the CO → ACS importer. +# +# Creates 2 scan configs in ACS and 3 SSBs in Kubernetes. One SSB +# intentionally shares a name with an ACS scan config to demonstrate +# conflict handling. +# +# All resources are tagged with a short unique ID (e.g. "d7f2") so +# they can be identified and cleaned up reliably. +# +# Usage: +# ./demo-seed.sh up # create fixtures +# ./demo-seed.sh down # tear down fixtures +# ./demo-seed.sh status # show what exists +# +# Prerequisites: +# ROX_ENDPOINT, ROX_ADMIN_PASSWORD (or ROX_API_TOKEN), kubectl access. + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +STATE_FILE="${SCRIPT_DIR}/.demo-seed-id" +CO_NS="openshift-compliance" + +# ── ACS connection ─────────────────────────────────────────────────────────── + +ACS_ENDPOINT="${ROX_ENDPOINT:?ROX_ENDPOINT must be set}" +ACS_URL="${ACS_ENDPOINT#http://}" +ACS_URL="${ACS_URL#https://}" +ACS_URL="https://${ACS_URL}" + +if [[ -n "${ROX_ADMIN_PASSWORD:-}" ]]; then + CURL_AUTH=(-u "admin:${ROX_ADMIN_PASSWORD}") +elif [[ -n "${ROX_API_TOKEN:-}" ]]; then + CURL_AUTH=(-H "Authorization: Bearer ${ROX_API_TOKEN}") +else + echo "ERROR: set ROX_ADMIN_PASSWORD or ROX_API_TOKEN" >&2 + exit 1 +fi + +# ── Helpers ────────────────────────────────────────────────────────────────── + +BOLD='\033[1m' DIM='\033[2m' GREEN='\033[32m' RED='\033[31m' +YELLOW='\033[33m' CYAN='\033[36m' RESET='\033[0m' + +ok() { echo -e " ${GREEN}✓${RESET} $1"; } +fail() { echo -e " ${RED}✗${RESET} $1"; } +info() { echo -e " ${DIM}$1${RESET}"; } +hdr() { echo -e "\n${CYAN}${BOLD}── $1 ──${RESET}\n"; } + +acs_api() { + local method="$1" path="$2" + shift 2 + curl -sk "${CURL_AUTH[@]}" -X "$method" \ + -H "Content-Type: application/json" \ + -H "Accept: application/json" \ + "${ACS_URL}${path}" "$@" +} + +# Get the ACS cluster ID for the current context. +get_acs_cluster_id() { + local clusters + clusters=$(acs_api GET "/v1/clusters" 2> /dev/null) + # Match by provider metadata (OpenShift cluster ID). + local ocp_id + ocp_id=$(kubectl get clusterversion version -o jsonpath='{.spec.clusterID}' 2> /dev/null || true) + if [[ -n "$ocp_id" ]]; then + local matched + matched=$(echo "$clusters" | python3 -c " +import sys, json +data = json.load(sys.stdin) +for c in data.get('clusters', []): + pid = c.get('status',{}).get('providerMetadata',{}).get('cluster',{}).get('id','') + if pid == '${ocp_id}': + print(c['id']); break +" 2> /dev/null || true) + if [[ -n "$matched" ]]; then + echo "$matched" + return + fi + fi + # Fallback: first cluster. + echo "$clusters" | python3 -c " +import sys, json +data = json.load(sys.stdin) +cs = data.get('clusters', []) +if cs: print(cs[0]['id']) +" 2> /dev/null +} + +generate_id() { + # Use od to avoid SIGPIPE from tr|head under pipefail. + od -An -tx1 -N4 /dev/urandom | tr -d ' \n' +} + +load_id() { + if [[ ! -f "$STATE_FILE" ]]; then + echo "ERROR: no active seed found (${STATE_FILE} missing). Run '$0 up' first." >&2 + exit 1 + fi + cat "$STATE_FILE" +} + +# Resource names derived from seed ID. +# ACS-only scans: demo-{id}-stig-weekly, demo-{id}-cis-audit +# K8s SSBs: demo-{id}-cis-audit (CONFLICT!), demo-{id}-moderate-daily, demo-{id}-pci-scan +# K8s ScanSetting: demo-{id}-setting +names_for() { + local id="$1" + ACS_SCAN_1="demo-${id}-stig-weekly" + ACS_SCAN_2="demo-${id}-cis-audit" + SSB_1="demo-${id}-cis-audit" # same as ACS_SCAN_2 → conflict + SSB_2="demo-${id}-moderate-daily" + SSB_3="demo-${id}-pci-scan" + SCAN_SETTING="demo-${id}-setting" +} + +# ── UP ─────────────────────────────────────────────────────────────────────── + +cmd_up() { + if [[ -f "$STATE_FILE" ]]; then + local old_id + old_id=$(cat "$STATE_FILE") + echo -e "${YELLOW}WARNING: seed '${old_id}' already exists. Run '$0 down' first or '$0 up --force'.${RESET}" + if [[ "${1:-}" != "--force" ]]; then exit 1; fi + cmd_down + fi + + local id + id=$(generate_id) + names_for "$id" + + echo -e "${BOLD}Seeding demo fixtures [id: ${CYAN}${id}${RESET}${BOLD}]${RESET}" + + # ── K8s: ScanSetting ───────────────────────────────────────────────── + hdr "Kubernetes: ScanSetting" + kubectl apply -f - << EOF +apiVersion: compliance.openshift.io/v1alpha1 +kind: ScanSetting +metadata: + name: ${SCAN_SETTING} + namespace: ${CO_NS} + labels: + demo-seed: "${id}" +schedule: "0 3 * * *" +roles: [worker, master] +rawResultStorage: + rotation: 3 + size: 1Gi +EOF + ok "${SCAN_SETTING} (daily 03:00)" + + # ── K8s: SSBs ──────────────────────────────────────────────────────── + hdr "Kubernetes: ScanSettingBindings" + for pair in \ + "${SSB_1}:ocp4-cis" \ + "${SSB_2}:ocp4-moderate" \ + "${SSB_3}:ocp4-pci-dss"; do + local name="${pair%%:*}" profile="${pair#*:}" + kubectl apply -f - << EOF +apiVersion: compliance.openshift.io/v1alpha1 +kind: ScanSettingBinding +metadata: + name: ${name} + namespace: ${CO_NS} + labels: + demo-seed: "${id}" +profiles: + - name: ${profile} + kind: Profile + apiGroup: compliance.openshift.io/v1alpha1 +settingsRef: + name: ${SCAN_SETTING} + kind: ScanSetting + apiGroup: compliance.openshift.io/v1alpha1 +EOF + local note="" + [[ "$name" == "$SSB_1" ]] && note=" ← will conflict with ACS scan" + ok "${name} (${profile})${note}" + done + + # ── ACS: scan configs ──────────────────────────────────────────────── + hdr "ACS: Scan Configurations" + local cluster_id + cluster_id=$(get_acs_cluster_id) + if [[ -z "$cluster_id" ]]; then + fail "Could not determine ACS cluster ID" + exit 1 + fi + info "Using ACS cluster ID: ${cluster_id}" + + # Scan 1: STIG weekly (no conflict with any SSB). + acs_api POST "/v2/compliance/scan/configurations" -d "{ + \"scanName\": \"${ACS_SCAN_1}\", + \"scanConfig\": { + \"oneTimeScan\": false, + \"profiles\": [\"ocp4-stig\"], + \"scanSchedule\": { + \"intervalType\": \"WEEKLY\", + \"hour\": 4, \"minute\": 0, + \"daysOfWeek\": { \"days\": [1] } + }, + \"description\": \"Demo seed ${id}: STIG weekly scan (no conflict)\" + }, + \"clusters\": [\"${cluster_id}\"] + }" > /dev/null 2>&1 + ok "${ACS_SCAN_1} (ocp4-stig, weekly Mon 04:00)" + + # Scan 2: CIS audit — same name as SSB_1 → deliberate conflict. + acs_api POST "/v2/compliance/scan/configurations" -d "{ + \"scanName\": \"${ACS_SCAN_2}\", + \"scanConfig\": { + \"oneTimeScan\": false, + \"profiles\": [\"ocp4-cis\"], + \"scanSchedule\": { + \"intervalType\": \"WEEKLY\", + \"hour\": 6, \"minute\": 30, + \"daysOfWeek\": { \"days\": [5] } + }, + \"description\": \"Demo seed ${id}: CIS audit — pre-existing, will conflict with SSB\" + }, + \"clusters\": [\"${cluster_id}\"] + }" > /dev/null 2>&1 + ok "${ACS_SCAN_2} (ocp4-cis, weekly Fri 06:30) ← conflicts with SSB" + + # ── Save state ─────────────────────────────────────────────────────── + echo "$id" > "$STATE_FILE" + + hdr "Summary" + echo -e " ${BOLD}Seed ID:${RESET} ${CYAN}${id}${RESET}" + echo -e " ${BOLD}K8s SSBs:${RESET} ${SSB_1}, ${SSB_2}, ${SSB_3}" + echo -e " ${BOLD}ACS scans:${RESET} ${ACS_SCAN_1}, ${ACS_SCAN_2}" + echo -e " ${BOLD}Conflict:${RESET} ${RED}${SSB_1}${RESET} (SSB) vs ${RED}${ACS_SCAN_2}${RESET} (ACS)" + echo "" + echo -e " ${DIM}Run the importer to see conflict handling:${RESET}" + echo -e " ${DIM} ./compliance-operator-importer --endpoint \$ROX_ENDPOINT --insecure-skip-verify${RESET}" + echo -e " ${DIM} ./compliance-operator-importer --endpoint \$ROX_ENDPOINT --insecure-skip-verify --overwrite-existing${RESET}" + echo "" + echo -e " ${DIM}Tear down: $0 down${RESET}" + echo "" +} + +# ── DOWN ───────────────────────────────────────────────────────────────────── + +cmd_down() { + local id + id=$(load_id) + names_for "$id" + + echo -e "${BOLD}Removing demo fixtures [id: ${CYAN}${id}${RESET}${BOLD}]${RESET}" + + # ── K8s ────────────────────────────────────────────────────────────── + hdr "Kubernetes" + for name in "$SSB_1" "$SSB_2" "$SSB_3"; do + if kubectl delete scansettingbinding "$name" -n "$CO_NS" --ignore-not-found 2> /dev/null; then + ok "Deleted SSB ${name}" + fi + done + if kubectl delete scansetting "$SCAN_SETTING" -n "$CO_NS" --ignore-not-found 2> /dev/null; then + ok "Deleted ScanSetting ${SCAN_SETTING}" + fi + + # ── ACS ────────────────────────────────────────────────────────────── + hdr "ACS" + local configs + configs=$(acs_api GET "/v2/compliance/scan/configurations?pagination.limit=1000" 2> /dev/null) + + # Delete any scan config whose name starts with "demo-{id}-". + echo "$configs" | python3 -c " +import sys, json +data = json.load(sys.stdin) +prefix = 'demo-${id}-' +for c in data.get('configurations', []): + if c['scanName'].startswith(prefix): + print(c['id'] + ' ' + c['scanName']) +" 2> /dev/null | while read -r cfg_id cfg_name; do + acs_api DELETE "/v2/compliance/scan/configurations/${cfg_id}" > /dev/null 2>&1 + ok "Deleted ACS scan config ${cfg_name} (${cfg_id})" + done + + rm -f "$STATE_FILE" + echo "" + ok "All demo-${id} fixtures removed." + echo "" +} + +# ── STATUS ─────────────────────────────────────────────────────────────────── + +cmd_status() { + local id + id=$(load_id) + names_for "$id" + + echo -e "${BOLD}Demo fixtures status [id: ${CYAN}${id}${RESET}${BOLD}]${RESET}" + + hdr "Kubernetes (namespace: ${CO_NS})" + kubectl get scansettingbindings.compliance.openshift.io,scansettings.compliance.openshift.io \ + -n "$CO_NS" -l "demo-seed=${id}" \ + -o custom-columns='KIND:.kind,NAME:.metadata.name' --no-headers 2> /dev/null \ + | while read -r kind name; do + info "${kind}: ${name}" + done + + hdr "ACS" + local configs + configs=$(acs_api GET "/v2/compliance/scan/configurations?pagination.limit=1000" 2> /dev/null) + echo "$configs" | python3 -c " +import sys, json +data = json.load(sys.stdin) +prefix = 'demo-${id}-' +for c in data.get('configurations', []): + if c['scanName'].startswith(prefix): + sched = c.get('scanConfig', {}).get('scanSchedule', {}) + profiles = c.get('scanConfig', {}).get('profiles', []) + interval = sched.get('intervalType', '?') + hour = sched.get('hour', '?') + minute = sched.get('minute', 0) + print(f\" {c['scanName']} ({', '.join(profiles)}, {interval} {hour}:{minute:02d}) id={c['id']}\") +" 2> /dev/null + echo "" +} + +# ── Main ───────────────────────────────────────────────────────────────────── +function help { + echo "Usage: $0 {up|down|status}" + echo "" + echo " up Create 2 ACS scan configs + 3 K8s SSBs (1 conflicting)" + echo " down Remove all fixtures created by 'up'" + echo " status Show current fixture state" +} + +case "${1:-}" in + up) cmd_up "${2:-}" ;; + down) cmd_down ;; + status) cmd_status ;; + help) help ;; + -h) help ;; + --help) help ;; + *) cmd_up "${2:-}" ;; +esac diff --git a/scripts/compliance-operator-importer/hack/demo.sh b/scripts/compliance-operator-importer/hack/demo.sh new file mode 100755 index 0000000000000..b4f5d59584629 --- /dev/null +++ b/scripts/compliance-operator-importer/hack/demo.sh @@ -0,0 +1,489 @@ +#!/usr/bin/env bash +# demo.sh — Interactive demo of the CO → ACS scheduled scan importer. +# +# Prerequisites: +# - kubectl configured with at least one context pointing to an OCP cluster +# with the Compliance Operator installed +# - ACS Central reachable from this machine +# - ROX_ADMIN_PASSWORD or ROX_API_TOKEN set +# - ROX_ENDPOINT set (or passed via --endpoint) +# - The importer binary built: +# cd scripts/compliance-operator-importer && go build -o compliance-operator-importer ./cmd/importer +# +# Usage: +# ROX_ADMIN_PASSWORD=admin ROX_ENDPOINT=central.example.com ./demo.sh +# +# Non-interactive mode (for CI/testing): +# DEMO_AUTO=1 ROX_ADMIN_PASSWORD=admin ROX_ENDPOINT=central.example.com ./demo.sh +# DEMO_AUTO=1 DEMO_PAUSE=0 ... # no pauses at all + +set -euo pipefail + +# ───────────────────────────────────────────────────────────────────────────── +# Configuration +# ───────────────────────────────────────────────────────────────────────────── + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +IMPORTER="${SCRIPT_DIR}/../compliance-operator-importer" +CO_NS="openshift-compliance" + +# Resolve ACS endpoint — strip scheme, the importer adds it back. +ACS_ENDPOINT="${ROX_ENDPOINT:?ROX_ENDPOINT must be set}" +ACS_URL="https://${ACS_ENDPOINT#https://}" +ACS_URL="${ACS_URL#http://}" +ACS_URL="https://${ACS_URL#https://}" + +# Auth for curl calls (basic auth only for this demo). +CURL_AUTH=(-u "admin:${ROX_ADMIN_PASSWORD:?ROX_ADMIN_PASSWORD must be set}") + +# Importer flags. +IMPORTER_FLAGS=(--endpoint "$ACS_ENDPOINT" --insecure-skip-verify) + +# Demo SSB names — prefixed to avoid collisions with real workloads. +DEMO_PREFIX="demo-import" +SSB_CIS="${DEMO_PREFIX}-cis-scan" +SSB_MODERATE="${DEMO_PREFIX}-moderate-scan" +SSB_PCI="${DEMO_PREFIX}-pci-dss-scan" + +# ───────────────────────────────────────────────────────────────────────────── +# Helpers +# ───────────────────────────────────────────────────────────────────────────── + +# Terminal colours. +BOLD='\033[1m' +DIM='\033[2m' +CYAN='\033[36m' +GREEN='\033[32m' +YELLOW='\033[33m' +RED='\033[31m' +MAGENTA='\033[35m' +RESET='\033[0m' + +banner() { + local width=72 + echo "" + echo -e "${CYAN}${BOLD}$(printf '═%.0s' $(seq 1 $width))${RESET}" + echo "$1" + echo -e "${CYAN}${BOLD}$(printf '═%.0s' $(seq 1 $width))${RESET}" + echo "" +} + +section() { + echo "" + echo -e "${MAGENTA}${BOLD}── $1 ──${RESET}" + echo "" +} + +info() { + echo -e "${DIM}$1${RESET}" +} + +narrate() { + echo -e "${YELLOW}$1${RESET}" +} + +success() { + echo -e "${GREEN} ✓ $1${RESET}" +} + +fail_msg() { + echo -e "${RED} ✗ $1${RESET}" +} + +pause() { + echo "" + if [[ "${DEMO_AUTO:-}" == "1" ]]; then + sleep "${DEMO_PAUSE:-2}" + else + echo -ne "${DIM}Press ENTER to continue...${RESET}" + read -r + fi + echo "" +} + +run_cmd() { + echo -e "${BOLD}\$ $*${RESET}" + "$@" 2>&1 || true + echo "" +} + +acs_api() { + local method="$1" path="$2" + shift 2 + curl -sk "${CURL_AUTH[@]}" -X "$method" \ + -H "Content-Type: application/json" \ + -H "Accept: application/json" \ + "${ACS_URL}${path}" "$@" +} + +# ───────────────────────────────────────────────────────────────────────────── +# Cleanup helper — removes all demo resources +# ───────────────────────────────────────────────────────────────────────────── + +cleanup_demo_resources() { + local quiet="${1:-}" + + [[ -z "$quiet" ]] && info "Cleaning up demo resources..." + + # Delete demo SSBs from the cluster. + for ssb in "$SSB_CIS" "$SSB_MODERATE" "$SSB_PCI"; do + kubectl delete scansettingbinding "$ssb" -n "$CO_NS" --ignore-not-found 2> /dev/null || true + done + + # Delete demo ScanSettings (original + ACS-created ones named after SSBs). + kubectl delete scansetting "${DEMO_PREFIX}-setting" -n "$CO_NS" --ignore-not-found 2> /dev/null || true + for ssb in "$SSB_CIS" "$SSB_MODERATE" "$SSB_PCI"; do + kubectl delete scansetting "$ssb" -n "$CO_NS" --ignore-not-found 2> /dev/null || true + done + + # Delete demo scan configs from ACS. + local configs + configs=$(acs_api GET "/v2/compliance/scan/configurations?pagination.limit=1000" 2> /dev/null) + for ssb in "$SSB_CIS" "$SSB_MODERATE" "$SSB_PCI"; do + local config_id + config_id=$(echo "$configs" | python3 -c " +import sys, json +data = json.load(sys.stdin) +for c in data.get('configurations', []): + if c['scanName'] == '$ssb': + print(c['id']) + break +" 2> /dev/null || true) + if [[ -n "$config_id" ]]; then + acs_api DELETE "/v2/compliance/scan/configurations/$config_id" > /dev/null 2>&1 || true + fi + done + + [[ -z "$quiet" ]] && success "Done" + return 0 +} + +# ───────────────────────────────────────────────────────────────────────────── +# Trap — clean up on exit or interrupt +# ───────────────────────────────────────────────────────────────────────────── + +trap 'echo ""; cleanup_demo_resources' EXIT + +# ═════════════════════════════════════════════════════════════════════════════ +# DEMO START +# ═════════════════════════════════════════════════════════════════════════════ + +clear +banner "CO → ACS Scheduled Scan Importer — Interactive Demo" + +narrate "This demo walks through the importer tool that reads Compliance Operator" +narrate "ScanSettingBinding resources from Kubernetes and creates equivalent scan" +narrate "configurations in Red Hat Advanced Cluster Security (ACS)." +echo "" +narrate "We will:" +narrate " 1. Create demo ScanSettingBindings on the cluster" +narrate " 2. Run the importer in dry-run mode" +narrate " 3. Run the importer for real (happy path)" +narrate " 4. Run again to see skip behaviour (idempotency)" +narrate " 5. Simulate schedule drift on the Kubernetes side" +narrate " 6. Run without --overwrite-existing (drift preserved)" +narrate " 7. Run with --overwrite-existing (drift resolved)" +echo "" +info "Cluster: $(kubectl config current-context)" +info "ACS: $ACS_URL" +info "CO NS: $CO_NS" + +pause + +# Pre-clean: silently remove leftovers from a previous run. +cleanup_demo_resources quiet + +# ───────────────────────────────────────────────────────────────────────────── +# STEP 1: Create demo ScanSetting and ScanSettingBindings +# ───────────────────────────────────────────────────────────────────────────── + +banner "Step 1: Create Demo Resources" + +narrate "First, we create a ScanSetting with a daily schedule (02:00 UTC)," +narrate "then three ScanSettingBindings that reference it — each binding" +narrate "targets a different compliance profile." + +pause + +section "Creating ScanSetting: ${DEMO_PREFIX}-setting" +info "Schedule: 0 2 * * * (daily at 02:00)" + +run_cmd kubectl apply -f - << EOF +apiVersion: compliance.openshift.io/v1alpha1 +kind: ScanSetting +metadata: + name: ${DEMO_PREFIX}-setting + namespace: ${CO_NS} +schedule: "0 2 * * *" +roles: + - worker + - master +rawResultStorage: + rotation: 3 + size: 1Gi +EOF + +section "Creating ScanSettingBinding: ${SSB_CIS}" +info "Profile: ocp4-cis" + +run_cmd kubectl apply -f - << EOF +apiVersion: compliance.openshift.io/v1alpha1 +kind: ScanSettingBinding +metadata: + name: ${SSB_CIS} + namespace: ${CO_NS} +profiles: + - name: ocp4-cis + kind: Profile + apiGroup: compliance.openshift.io/v1alpha1 +settingsRef: + name: ${DEMO_PREFIX}-setting + kind: ScanSetting + apiGroup: compliance.openshift.io/v1alpha1 +EOF + +section "Creating ScanSettingBinding: ${SSB_MODERATE}" +info "Profile: ocp4-moderate" + +run_cmd kubectl apply -f - << EOF +apiVersion: compliance.openshift.io/v1alpha1 +kind: ScanSettingBinding +metadata: + name: ${SSB_MODERATE} + namespace: ${CO_NS} +profiles: + - name: ocp4-moderate + kind: Profile + apiGroup: compliance.openshift.io/v1alpha1 +settingsRef: + name: ${DEMO_PREFIX}-setting + kind: ScanSetting + apiGroup: compliance.openshift.io/v1alpha1 +EOF + +section "Creating ScanSettingBinding: ${SSB_PCI}" +info "Profile: ocp4-pci-dss" + +run_cmd kubectl apply -f - << EOF +apiVersion: compliance.openshift.io/v1alpha1 +kind: ScanSettingBinding +metadata: + name: ${SSB_PCI} + namespace: ${CO_NS} +profiles: + - name: ocp4-pci-dss + kind: Profile + apiGroup: compliance.openshift.io/v1alpha1 +settingsRef: + name: ${DEMO_PREFIX}-setting + kind: ScanSetting + apiGroup: compliance.openshift.io/v1alpha1 +EOF + +section "Verify: resources on the cluster" +run_cmd kubectl get scansettingbindings.compliance.openshift.io -n "$CO_NS" \ + -l '!app.kubernetes.io/managed-by' \ + -o custom-columns='NAME:.metadata.name,SETTING:.settingsRef.name,PROFILES:.profiles[*].name' + +narrate "Three ScanSettingBindings created, each referencing the demo ScanSetting." +narrate "The importer will read these and create matching ACS scan configurations." + +pause + +# ───────────────────────────────────────────────────────────────────────────── +# STEP 2: Dry run +# ───────────────────────────────────────────────────────────────────────────── + +banner "Step 2: Dry Run" + +narrate "Before making any changes, let's preview what the importer would do." +narrate "The --dry-run flag shows planned actions without touching ACS." + +pause + +run_cmd "$IMPORTER" "${IMPORTER_FLAGS[@]}" --dry-run + +narrate "The importer discovered our 3 demo SSBs, mapped them to ACS scan" +narrate "configurations, and reported that it would create all three." +narrate "No changes were made to ACS." + +pause + +# ───────────────────────────────────────────────────────────────────────────── +# STEP 3: Happy path — real import +# ───────────────────────────────────────────────────────────────────────────── + +banner "Step 3: Import (Happy Path)" + +narrate "Now let's run the importer for real. It will create three scan" +narrate "configurations in ACS, one for each ScanSettingBinding." + +pause + +run_cmd "$IMPORTER" "${IMPORTER_FLAGS[@]}" + +section "Verify: scan configurations in ACS" +info "Querying ACS API for our demo scan configs..." +echo "" + +for ssb in "$SSB_CIS" "$SSB_MODERATE" "$SSB_PCI"; do + local_configs=$(acs_api GET "/v2/compliance/scan/configurations?pagination.limit=1000" 2> /dev/null) + found=$(echo "$local_configs" | python3 -c " +import sys, json +data = json.load(sys.stdin) +for c in data.get('configurations', []): + if c['scanName'] == '$ssb': + sched = c.get('scanConfig', {}).get('scanSchedule', {}) + profiles = c.get('scanConfig', {}).get('profiles', []) + print(f\" Name: {c['scanName']}\") + print(f\" ID: {c['id']}\") + print(f\" Schedule: {sched.get('intervalType','?')} at {sched.get('hour','?')}:{sched.get('minute','?'):02d}\") + print(f\" Profiles: {', '.join(profiles)}\") + break +" 2> /dev/null || true) + if [[ -n "$found" ]]; then + success "Found in ACS:" + echo "$found" + echo "" + fi +done + +narrate "All three scan configurations were created successfully in ACS." + +pause + +# ───────────────────────────────────────────────────────────────────────────── +# STEP 4: Idempotency — run again, expect skips +# ───────────────────────────────────────────────────────────────────────────── + +banner "Step 4: Idempotency" + +narrate "What happens if we run the importer again? Since the scan configurations" +narrate "already exist in ACS, the importer should skip them gracefully." + +pause + +run_cmd "$IMPORTER" "${IMPORTER_FLAGS[@]}" + +narrate "All three were skipped — the importer is idempotent by default." +narrate "It detects existing scan configs by name and does not create duplicates." + +pause + +# ───────────────────────────────────────────────────────────────────────────── +# STEP 5: Simulate schedule drift on the Kubernetes side +# ───────────────────────────────────────────────────────────────────────────── + +banner "Step 5: Simulate Schedule Drift" + +narrate "After the initial import, each SSB was adopted — its settingsRef now" +narrate "points to an ACS-managed ScanSetting (same name as the scan config)." +narrate "" +narrate "Let's simulate a real-world scenario: someone edits the ACS-managed" +narrate "ScanSetting directly on the cluster (e.g. via kubectl). ACS does NOT" +narrate "detect this change — the UI still shows the original schedule, but" +narrate "scans actually run on the new schedule. A silent drift." + +pause + +section "Editing ACS-managed ScanSetting directly on the cluster" +info "ScanSetting '${SSB_CIS}' was created by ACS with schedule 0 2 * * *" +info "Patching it to 0 5 * * * (daily at 05:00)" + +run_cmd kubectl patch scansetting "${SSB_CIS}" -n "$CO_NS" \ + --type merge -p '{"schedule": "0 5 * * *"}' + +section "Verify: cluster vs ACS" +echo "" +echo -e "${BOLD}On the cluster (actual behaviour):${RESET}" +kubectl get scansetting "${SSB_CIS}" -n "$CO_NS" \ + -o custom-columns='SCANSETTING:.metadata.name,SCHEDULE:.schedule' --no-headers +echo "" +echo -e "${BOLD}In ACS (what the UI shows):${RESET}" +acs_api GET "/v2/compliance/scan/configurations?pagination.limit=1000" 2> /dev/null | python3 -c " +import sys, json +data = json.load(sys.stdin) +for c in data.get('configurations', []): + if c['scanName'] == '${SSB_CIS}': + sched = c.get('scanConfig', {}).get('scanSchedule', {}) + print(f\" {c['scanName']}: {sched.get('intervalType','?')} at {sched.get('hour','?')}:{sched.get('minute','?'):02d}\") + break +" 2> /dev/null +echo "" + +narrate "The cluster now scans at 05:00, but ACS still thinks it's 02:00." +narrate "This silent drift is exactly what the importer can detect and fix." + +pause + +# ───────────────────────────────────────────────────────────────────────────── +# STEP 6: Run without --overwrite-existing (skip conflict) +# ───────────────────────────────────────────────────────────────────────────── + +banner "Step 6: Default Behaviour (Skip Conflicts)" + +narrate "Running the importer without --overwrite-existing. The scan config" +narrate "already exists in ACS, so the importer will skip it — even though" +narrate "the schedule has drifted on the cluster." + +pause + +run_cmd "$IMPORTER" "${IMPORTER_FLAGS[@]}" + +narrate "All three were skipped — the importer found existing configs by name" +narrate "and left them untouched. The drifted CIS config was NOT updated." +narrate "This is the safe default: no surprises, no overwrites." + +pause + +# ───────────────────────────────────────────────────────────────────────────── +# STEP 7: Run with --overwrite-existing (resolve drift) +# ───────────────────────────────────────────────────────────────────────────── + +banner "Step 7: Overwrite Mode (Resolve Drift)" + +narrate "Now let's run with --overwrite-existing. This tells the importer to" +narrate "update existing ACS scan configs to match what's on the cluster." +narrate "The CIS config in ACS will be updated from 02:00 → 05:00." + +pause + +run_cmd "$IMPORTER" "${IMPORTER_FLAGS[@]}" --overwrite-existing + +section "Verify: ACS now matches the cluster" +acs_api GET "/v2/compliance/scan/configurations?pagination.limit=1000" 2> /dev/null | python3 -c " +import sys, json +data = json.load(sys.stdin) +for c in data.get('configurations', []): + if c['scanName'] == '${SSB_CIS}': + sched = c.get('scanConfig', {}).get('scanSchedule', {}) + print(f\" Name: {c['scanName']}\") + print(f\" Schedule: {sched.get('intervalType','?')} at {sched.get('hour','?')}:{sched.get('minute','?'):02d}\") + break +" 2> /dev/null +echo "" + +narrate "The CIS scan config has been updated to DAILY 05:00 — matching the" +narrate "cluster's ScanSetting. The --overwrite-existing flag ensures ACS" +narrate "stays in sync with the Compliance Operator source of truth." + +pause + +# ───────────────────────────────────────────────────────────────────────────── +# Done — EXIT trap handles cleanup automatically +# ───────────────────────────────────────────────────────────────────────────── + +banner "Demo Complete" + +narrate "Summary of what we demonstrated:" +echo "" +echo -e " ${GREEN}1.${RESET} Created CO resources (ScanSetting + 3 ScanSettingBindings)" +echo -e " ${GREEN}2.${RESET} Dry-run mode: preview without side effects" +echo -e " ${GREEN}3.${RESET} Happy path: imported all SSBs into ACS scan configs + adoption" +echo -e " ${GREEN}4.${RESET} Idempotency: re-run skips existing configs safely" +echo -e " ${GREEN}5.${RESET} Schedule drift: changed ScanSetting schedule on the cluster" +echo -e " ${GREEN}6.${RESET} Default skip: drift preserved without --overwrite-existing" +echo -e " ${GREEN}7.${RESET} Overwrite mode: drift resolved, ACS re-synced to cluster" +echo "" + +# The EXIT trap handles cleanup automatically. diff --git a/scripts/compliance-operator-importer/hack/run-e2e.sh b/scripts/compliance-operator-importer/hack/run-e2e.sh new file mode 100755 index 0000000000000..3b84a596e4b40 --- /dev/null +++ b/scripts/compliance-operator-importer/hack/run-e2e.sh @@ -0,0 +1,51 @@ +#!/usr/bin/env bash +# Run e2e acceptance tests against a real ACS + Compliance Operator cluster. +# +# USAGE: +# hack/run-e2e.sh # run all e2e tests +# hack/run-e2e.sh -run TestIMP_ACC_003 # run a specific test +# +# Required environment: +# ROX_ENDPOINT ACS Central URL (bare hostname OK) +# ROX_ADMIN_PASSWORD Basic auth password (or ROX_API_TOKEN for token auth) +# +# Optional: +# CO_NAMESPACE CO namespace (default: openshift-compliance) +# E2E_KEEP_CONFIGS Set to "1" to skip cleanup of created scan configs +# +# The script builds the importer, then runs `go test -tags e2e` against e2e/. + +set -euo pipefail + +SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)" +ROOT="$(cd "$SCRIPT_DIR/.." && pwd)" +cd "$ROOT" + +# Validate prerequisites. +if [[ -z "${ROX_ENDPOINT:-}" ]]; then + echo "ERROR: ROX_ENDPOINT not set" >&2 + echo " export ROX_ENDPOINT=central-stackrox.apps.mycluster.example.com" >&2 + exit 1 +fi + +if [[ -z "${ROX_API_TOKEN:-}" ]] && [[ -z "${ROX_ADMIN_PASSWORD:-}" ]]; then + echo "ERROR: neither ROX_API_TOKEN nor ROX_ADMIN_PASSWORD is set" >&2 + echo " export ROX_API_TOKEN= # for token auth" >&2 + echo " export ROX_ADMIN_PASSWORD= # for basic auth" >&2 + exit 1 +fi + +command -v kubectl >/dev/null 2>&1 || { + echo "ERROR: kubectl not found in PATH" >&2 + exit 1 +} + +echo "=== E2E Test Configuration ===" +echo " ROX_ENDPOINT: ${ROX_ENDPOINT}" +echo " Auth mode: $(if [[ -n "${ROX_API_TOKEN:-}" ]]; then echo "token"; else echo "basic"; fi)" +echo " CO_NAMESPACE: ${CO_NAMESPACE:-openshift-compliance}" +echo " E2E_KEEP_CONFIGS: ${E2E_KEEP_CONFIGS:-0}" +echo "" + +# Run tests. Pass through any extra args (e.g. -run, -v). +exec go test -tags e2e -v -count=1 -timeout 5m ./e2e/ "$@" diff --git a/scripts/compliance-operator-importer/internal/acs/client.go b/scripts/compliance-operator-importer/internal/acs/client.go new file mode 100644 index 0000000000000..3d9e40049d717 --- /dev/null +++ b/scripts/compliance-operator-importer/internal/acs/client.go @@ -0,0 +1,343 @@ +// Package acs provides an HTTP client for the ACS compliance scan configuration API. +package acs + +import ( + "bytes" + "context" + "crypto/tls" + "crypto/x509" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "os" + "time" + + "github.com/stackrox/co-acs-importer/internal/models" +) + +// client is the concrete implementation of models.ACSClient. +type client struct { + httpClient *http.Client + baseURL string + cfg *models.Config +} + +// NewClient creates a models.ACSClient from cfg. +// +// TLS is configured from cfg.CACertFile and cfg.InsecureSkipVerify. +// Timeout is set from cfg.RequestTimeout. +// Authentication: +// - token mode: "Authorization: Bearer " (token resolved from cfg.TokenEnv) +// - basic mode: HTTP Basic auth (cfg.Username + password from cfg.PasswordEnv) +func NewClient(cfg *models.Config) (models.ACSClient, error) { + tlsCfg, err := buildTLSConfig(cfg) + if err != nil { + return nil, fmt.Errorf("acs: building TLS config: %w", err) + } + + transport := &http.Transport{ + TLSClientConfig: tlsCfg, + } + + timeout := cfg.RequestTimeout + if timeout == 0 { + timeout = 30 * time.Second + } + + return &client{ + httpClient: &http.Client{ + Transport: transport, + Timeout: timeout, + }, + baseURL: cfg.ACSEndpoint, + cfg: cfg, + }, nil +} + +// buildTLSConfig constructs a tls.Config from the importer config. +func buildTLSConfig(cfg *models.Config) (*tls.Config, error) { + tlsCfg := &tls.Config{ + MinVersion: tls.VersionTLS12, + InsecureSkipVerify: cfg.InsecureSkipVerify, //nolint:gosec // controlled by explicit CLI flag + } + + if cfg.CACertFile != "" { + pemData, err := os.ReadFile(cfg.CACertFile) + if err != nil { + return nil, fmt.Errorf("reading CA cert file %q: %w", cfg.CACertFile, err) + } + pool := x509.NewCertPool() + if !pool.AppendCertsFromPEM(pemData) { + return nil, fmt.Errorf("no valid PEM certificates found in %q", cfg.CACertFile) + } + tlsCfg.RootCAs = pool + } + + return tlsCfg, nil +} + +// addAuth adds the correct Authorization header to req based on the configured auth mode. +func (c *client) addAuth(req *http.Request) error { + switch c.cfg.AuthMode { + case models.AuthModeBasic: + password := os.Getenv("ROX_ADMIN_PASSWORD") + req.SetBasicAuth(c.cfg.Username, password) + default: // token mode + token := os.Getenv("ROX_API_TOKEN") + if token == "" { + return errors.New("acs: ROX_API_TOKEN is empty") + } + req.Header.Set("Authorization", "Bearer "+token) + } + return nil +} + +// Preflight checks ACS connectivity and auth by calling: +// +// GET /v2/compliance/scan/configurations?pagination.limit=1 +// +// Only HTTP 200 is treated as success; any other status returns an error. +// +// Implements IMP-CLI-015, IMP-CLI-016. +func (c *client) Preflight(ctx context.Context) error { + url := c.baseURL + "/v2/compliance/scan/configurations?pagination.limit=1" + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return fmt.Errorf("acs: preflight request: %w", err) + } + req.Header.Set("Accept", "application/json") + if err := c.addAuth(req); err != nil { + return err + } + + resp, err := c.httpClient.Do(req) + if err != nil { + return fmt.Errorf("acs: preflight failed: %w", err) + } + defer func() { _ = resp.Body.Close() }() + + if resp.StatusCode != http.StatusOK { + switch resp.StatusCode { + case http.StatusUnauthorized: + return errors.New("acs: preflight: HTTP 401 Unauthorized - check token or credentials") + case http.StatusForbidden: + return errors.New("acs: preflight: HTTP 403 Forbidden - token lacks required permissions") + default: + return fmt.Errorf("acs: preflight: unexpected HTTP %d", resp.StatusCode) + } + } + return nil +} + +// ListScanConfigurations returns all existing scan configuration summaries by calling: +// +// GET /v2/compliance/scan/configurations?pagination.limit=1000 +// +// Implements IMP-IDEM-001 (used to build the existing-name set). +func (c *client) ListScanConfigurations(ctx context.Context) ([]models.ACSConfigSummary, error) { + url := c.baseURL + "/v2/compliance/scan/configurations?pagination.limit=1000" + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return nil, fmt.Errorf("acs: list request: %w", err) + } + req.Header.Set("Accept", "application/json") + if err := c.addAuth(req); err != nil { + return nil, err + } + + resp, err := c.httpClient.Do(req) + if err != nil { + return nil, fmt.Errorf("acs: list scan configurations: %w", err) + } + defer func() { _ = resp.Body.Close() }() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("acs: list scan configurations: HTTP %d", resp.StatusCode) + } + + var listResp models.ACSListResponse + if err := json.NewDecoder(resp.Body).Decode(&listResp); err != nil { + return nil, fmt.Errorf("acs: decoding list response: %w", err) + } + return listResp.Configurations, nil +} + +// complianceScanConfigurationResponse is used to parse the id from the POST response. +type complianceScanConfigurationResponse struct { + ID string `json:"id"` +} + +// CreateScanConfiguration sends POST /v2/compliance/scan/configurations and returns +// the ID of the newly created configuration. +// +// Implements IMP-IDEM-001. +func (c *client) CreateScanConfiguration(ctx context.Context, payload models.ACSCreatePayload) (string, error) { + body, err := json.Marshal(payload) + if err != nil { + return "", fmt.Errorf("acs: marshalling create payload: %w", err) + } + + url := c.baseURL + "/v2/compliance/scan/configurations" + req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, bytes.NewReader(body)) + if err != nil { + return "", fmt.Errorf("acs: create request: %w", err) + } + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Accept", "application/json") + if err := c.addAuth(req); err != nil { + return "", err + } + + resp, err := c.httpClient.Do(req) + if err != nil { + return "", fmt.Errorf("acs: create scan configuration: %w", err) + } + defer func() { _ = resp.Body.Close() }() + + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusCreated { + return "", &HTTPError{Code: resp.StatusCode, Message: fmt.Sprintf("POST /v2/compliance/scan/configurations returned HTTP %d: %s", resp.StatusCode, readBodySnippet(resp))} + } + + var created complianceScanConfigurationResponse + if err := json.NewDecoder(resp.Body).Decode(&created); err != nil { + return "", fmt.Errorf("acs: decoding create response: %w", err) + } + if created.ID == "" { + return "", errors.New("acs: create response contained empty id") + } + return created.ID, nil +} + +// UpdateScanConfiguration sends PUT /v2/compliance/scan/configurations/{id} to update +// an existing scan configuration. +// +// Implements IMP-IDEM-008. +func (c *client) UpdateScanConfiguration(ctx context.Context, id string, payload models.ACSCreatePayload) error { + body, err := json.Marshal(payload) + if err != nil { + return fmt.Errorf("acs: marshalling update payload: %w", err) + } + + url := c.baseURL + "/v2/compliance/scan/configurations/" + id + req, err := http.NewRequestWithContext(ctx, http.MethodPut, url, bytes.NewReader(body)) + if err != nil { + return fmt.Errorf("acs: update request: %w", err) + } + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Accept", "application/json") + if err := c.addAuth(req); err != nil { + return err + } + + resp, err := c.httpClient.Do(req) + if err != nil { + return fmt.Errorf("acs: update scan configuration: %w", err) + } + defer func() { _ = resp.Body.Close() }() + + if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { + return &HTTPError{Code: resp.StatusCode, Message: fmt.Sprintf("PUT /v2/compliance/scan/configurations/%s returned HTTP %d: %s", id, resp.StatusCode, readBodySnippet(resp))} + } + + return nil +} + +// clusterStatus is used to parse the status field from a cluster response. +type clusterStatus struct { + ProviderMetadata struct { + Cluster struct { + ID string `json:"id"` // OpenShift cluster ID or other provider cluster ID + } `json:"cluster"` + } `json:"providerMetadata"` +} + +// clusterResponse represents a single cluster in the ACS API response. +type clusterResponse struct { + ID string `json:"id"` + Name string `json:"name"` + Status clusterStatus `json:"status"` +} + +// clustersListResponse matches GET /v1/clusters. +type clustersListResponse struct { + Clusters []clusterResponse `json:"clusters"` +} + +// ListClusters returns all clusters managed by ACS by calling: +// +// GET /v1/clusters +// +// Used for cluster ID discovery (IMP-MAP-017, IMP-MAP-018, IMP-MAP-007). +func (c *client) ListClusters(ctx context.Context) ([]models.ACSClusterInfo, error) { + url := c.baseURL + "/v1/clusters" + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return nil, fmt.Errorf("acs: list clusters request: %w", err) + } + req.Header.Set("Accept", "application/json") + if err := c.addAuth(req); err != nil { + return nil, err + } + + resp, err := c.httpClient.Do(req) + if err != nil { + return nil, fmt.Errorf("acs: list clusters: %w", err) + } + defer func() { _ = resp.Body.Close() }() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("acs: list clusters: HTTP %d", resp.StatusCode) + } + + var listResp clustersListResponse + if err := json.NewDecoder(resp.Body).Decode(&listResp); err != nil { + return nil, fmt.Errorf("acs: decoding clusters response: %w", err) + } + + result := make([]models.ACSClusterInfo, 0, len(listResp.Clusters)) + for _, c := range listResp.Clusters { + result = append(result, models.ACSClusterInfo{ + ID: c.ID, + Name: c.Name, + ProviderClusterID: c.Status.ProviderMetadata.Cluster.ID, + }) + } + return result, nil +} + +// readBodySnippet reads up to 512 bytes from the response body for error reporting. +func readBodySnippet(resp *http.Response) string { + const maxBytes = 512 + body, err := io.ReadAll(io.LimitReader(resp.Body, maxBytes)) + if err != nil || len(body) == 0 { + return "(no response body)" + } + snippet := string(body) + // Try to extract a cleaner message from JSON error responses. + var parsed struct { + Message string `json:"message"` + Error string `json:"error"` + } + if json.Unmarshal(body, &parsed) == nil { + if parsed.Message != "" { + return parsed.Message + } + if parsed.Error != "" { + return parsed.Error + } + } + return snippet +} + +// HTTPError is returned by CreateScanConfiguration and UpdateScanConfiguration when the server responds with +// a non-success HTTP status. The reconciler uses StatusCode() to decide whether +// to retry (transient: 429,502,503,504) or abort (non-transient: 400,401,403,404). +type HTTPError struct { + Code int + Message string +} + +func (e *HTTPError) Error() string { return e.Message } +func (e *HTTPError) StatusCode() int { return e.Code } diff --git a/scripts/compliance-operator-importer/internal/acs/client_test.go b/scripts/compliance-operator-importer/internal/acs/client_test.go new file mode 100644 index 0000000000000..99d8258cd862d --- /dev/null +++ b/scripts/compliance-operator-importer/internal/acs/client_test.go @@ -0,0 +1,256 @@ +package acs_test + +import ( + "context" + "crypto/tls" + "encoding/json" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "github.com/stackrox/co-acs-importer/internal/acs" + "github.com/stackrox/co-acs-importer/internal/models" +) + +// newTestConfig returns a Config wired to the given TLS test server URL. +// InsecureSkipVerify is always true so the self-signed httptest cert is accepted. +func newTestConfig(serverURL string) *models.Config { + return &models.Config{ + ACSEndpoint: serverURL, + AuthMode: models.AuthModeToken, + RequestTimeout: 5 * time.Second, + MaxRetries: 3, + InsecureSkipVerify: true, + } +} + +// startTLSServer starts an httptest TLS server with the provided handler and +// returns the server plus an http.Client pre-configured with the server's TLS cert. +func startTLSServer(handler http.Handler) (*httptest.Server, *http.Client) { + srv := httptest.NewTLSServer(handler) + client := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, //nolint:gosec // test only + }, + Timeout: 5 * time.Second, + } + return srv, client +} + +// IMP-CLI-015: Preflight 200 => nil error +func TestPreflight_200_ReturnsNil(t *testing.T) { + srv, _ := startTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/v2/compliance/scan/configurations" { + http.NotFound(w, r) + return + } + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + _ = json.NewEncoder(w).Encode(models.ACSListResponse{}) + })) + defer srv.Close() + + t.Setenv("ROX_API_TOKEN", "test-token") + cfg := newTestConfig(srv.URL) + client, err := acs.NewClient(cfg) + if err != nil { + t.Fatalf("NewClient: %v", err) + } + + if err := client.Preflight(context.Background()); err != nil { + t.Errorf("IMP-CLI-015: Preflight with HTTP 200 should return nil, got: %v", err) + } +} + +// IMP-CLI-016: Preflight 401 => error +func TestPreflight_401_ReturnsError(t *testing.T) { + srv, _ := startTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + http.Error(w, "Unauthorized", http.StatusUnauthorized) + })) + defer srv.Close() + + t.Setenv("ROX_API_TOKEN", "bad-token") + cfg := newTestConfig(srv.URL) + client, err := acs.NewClient(cfg) + if err != nil { + t.Fatalf("NewClient: %v", err) + } + + if err := client.Preflight(context.Background()); err == nil { + t.Error("IMP-CLI-016: Preflight with HTTP 401 should return error, got nil") + } +} + +// IMP-CLI-016: Preflight 403 => error +func TestPreflight_403_ReturnsError(t *testing.T) { + srv, _ := startTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + http.Error(w, "Forbidden", http.StatusForbidden) + })) + defer srv.Close() + + t.Setenv("ROX_API_TOKEN", "bad-token") + cfg := newTestConfig(srv.URL) + client, err := acs.NewClient(cfg) + if err != nil { + t.Fatalf("NewClient: %v", err) + } + + if err := client.Preflight(context.Background()); err == nil { + t.Error("IMP-CLI-016: Preflight with HTTP 403 should return error, got nil") + } +} + +// IMP-IDEM-001: ListScanConfigurations returns parsed list +func TestListScanConfigurations_ReturnsParsedList(t *testing.T) { + want := []models.ACSConfigSummary{ + {ID: "id-1", ScanName: "cis-weekly"}, + {ID: "id-2", ScanName: "pci-daily"}, + } + srv, _ := startTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/v2/compliance/scan/configurations" { + http.NotFound(w, r) + return + } + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + _ = json.NewEncoder(w).Encode(models.ACSListResponse{ + Configurations: want, + TotalCount: int32(len(want)), + }) + })) + defer srv.Close() + + t.Setenv("ROX_API_TOKEN", "test-token") + cfg := newTestConfig(srv.URL) + client, err := acs.NewClient(cfg) + if err != nil { + t.Fatalf("NewClient: %v", err) + } + + got, err := client.ListScanConfigurations(context.Background()) + if err != nil { + t.Fatalf("IMP-IDEM-001: ListScanConfigurations: %v", err) + } + if len(got) != len(want) { + t.Fatalf("IMP-IDEM-001: expected %d configs, got %d", len(want), len(got)) + } + for i, g := range got { + if g.ID != want[i].ID || g.ScanName != want[i].ScanName { + t.Errorf("IMP-IDEM-001: item[%d]: got {%s %s}, want {%s %s}", i, g.ID, g.ScanName, want[i].ID, want[i].ScanName) + } + } +} + +// IMP-IDEM-003: CreateScanConfiguration uses POST method (never PUT) +// IMP-IDEM-001: CreateScanConfiguration returns new config ID +func TestCreateScanConfiguration_UsesPOSTAndReturnsID(t *testing.T) { + const wantID = "new-config-id-123" + var gotMethod string + + srv, _ := startTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path != "/v2/compliance/scan/configurations" { + http.NotFound(w, r) + return + } + gotMethod = r.Method + if r.Method != http.MethodPost { + http.Error(w, "method not allowed", http.StatusMethodNotAllowed) + return + } + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + _ = json.NewEncoder(w).Encode(map[string]string{"id": wantID}) + })) + defer srv.Close() + + t.Setenv("ROX_API_TOKEN", "test-token") + cfg := newTestConfig(srv.URL) + client, err := acs.NewClient(cfg) + if err != nil { + t.Fatalf("NewClient: %v", err) + } + + payload := models.ACSCreatePayload{ + ScanName: "cis-weekly", + ScanConfig: models.ACSBaseScanConfig{ + Profiles: []string{"ocp4-cis"}, + Description: "test", + }, + Clusters: []string{"cluster-a"}, + } + + gotID, err := client.CreateScanConfiguration(context.Background(), payload) + if err != nil { + t.Fatalf("IMP-IDEM-001: CreateScanConfiguration: %v", err) + } + + // IMP-IDEM-003: must use POST, never PUT + if gotMethod != http.MethodPost { + t.Errorf("IMP-IDEM-003: expected method POST, got %s", gotMethod) + } + if gotMethod == http.MethodPut { + t.Errorf("IMP-IDEM-003: VIOLATION - PUT was called, which is forbidden in Phase 1") + } + + // IMP-IDEM-001: must return the ID from the response + if gotID != wantID { + t.Errorf("IMP-IDEM-001: expected ID %q, got %q", wantID, gotID) + } +} + +// IMP-IDEM-003: Compile-time guard - verify the ACSClient interface has no Put method. +func TestNoPUTMethodOnInterface(t *testing.T) { + // IMP-IDEM-003: This test documents the invariant. + t.Log("IMP-IDEM-003: ACSClient interface has no PUT method - enforced by interface definition") +} + +// TestCreateScanConfiguration_400_IncludesResponseBody verifies that HTTP 400 +// errors include the server's response body in the error message. +func TestCreateScanConfiguration_400_IncludesResponseBody(t *testing.T) { + const apiError = "Unable to find all profiles for scan configuration named \"cis-weekly\"." + srv, _ := startTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusBadRequest) + _ = json.NewEncoder(w).Encode(map[string]string{"message": apiError}) + })) + defer srv.Close() + + t.Setenv("ROX_API_TOKEN", "test-token") + cfg := newTestConfig(srv.URL) + client, err := acs.NewClient(cfg) + if err != nil { + t.Fatalf("NewClient: %v", err) + } + + payload := models.ACSCreatePayload{ + ScanName: "cis-weekly", + ScanConfig: models.ACSBaseScanConfig{ + Profiles: []string{"ocp4-cis"}, + Description: "test", + }, + Clusters: []string{"cluster-a"}, + } + + _, err = client.CreateScanConfiguration(context.Background(), payload) + if err == nil { + t.Fatal("expected error for HTTP 400, got nil") + } + + // Verify the error message contains the API error text. + errMsg := err.Error() + if !strings.Contains(errMsg, apiError) { + t.Errorf("error message should contain API error %q, got: %s", apiError, errMsg) + } + + // Verify it's an HTTPError with correct status code. + type statusCoder interface{ StatusCode() int } + sc, ok := err.(statusCoder) + if !ok { + t.Fatal("expected error to satisfy StatusCode() interface") + } + if sc.StatusCode() != 400 { + t.Errorf("expected status code 400, got %d", sc.StatusCode()) + } +} diff --git a/scripts/compliance-operator-importer/internal/adopt/adopt.go b/scripts/compliance-operator-importer/internal/adopt/adopt.go new file mode 100644 index 0000000000000..85794511a4e74 --- /dev/null +++ b/scripts/compliance-operator-importer/internal/adopt/adopt.go @@ -0,0 +1,154 @@ +// Package adopt patches ScanSettingBinding resources on each cluster to +// reference the ScanSetting that ACS creates when a scan configuration is +// pushed to Sensor. This completes the "handover" so the SSB is fully +// managed by ACS going forward. +package adopt + +import ( + "context" + "fmt" + "time" + + "github.com/stackrox/co-acs-importer/internal/cofetch" +) + +// DefaultPollInterval is how often to check for the ScanSetting. +const DefaultPollInterval = 3 * time.Second + +// DefaultPollTimeout is the maximum time to wait for the ScanSetting to appear. +const DefaultPollTimeout = 60 * time.Second + +// Request describes one SSB that should be adopted after an ACS scan config +// was successfully created. +type Request struct { + SSBName string // ScanSettingBinding name (= ACS scan config name) + SSBNamespace string + OldSettingRef string // current settingsRef.name on the SSB + ClusterLabel string // kubeconfig context name, for logging + COClient cofetch.COClient // k8s client scoped to this cluster + + // PreExistingScanSettings is the set of ScanSetting names that existed + // on this cluster before reconciliation. If the target name is in this + // set, adoption is skipped to avoid patching the SSB onto a resource + // that ACS doesn't control. + PreExistingScanSettings map[string]bool +} + +// Result records the outcome for one adoption request. +type Result struct { + SSBName string + ClusterLabel string + Adopted bool // true if the SSB was patched + Skipped bool // true if settingsRef already correct + TimedOut bool // true if the ScanSetting didn't appear in time + Err error // non-nil on unexpected failure + Message string // human-readable description of what happened +} + +// Adopter runs the adoption step for a batch of requests. +type Adopter struct { + PollInterval time.Duration + PollTimeout time.Duration +} + +// New creates an Adopter with default poll settings. +func New() *Adopter { + return &Adopter{ + PollInterval: DefaultPollInterval, + PollTimeout: DefaultPollTimeout, + } +} + +// Adopt processes a list of adoption requests. Each request is handled +// independently — a failure or timeout on one cluster does not block others. +func (a *Adopter) Adopt(ctx context.Context, requests []Request) []Result { + results := make([]Result, 0, len(requests)) + for _, req := range requests { + results = append(results, a.adoptOne(ctx, req)) + } + return results +} + +func (a *Adopter) adoptOne(ctx context.Context, req Request) Result { + newSettingName := req.SSBName // ACS creates a ScanSetting with the same name as the scan config + + // IMP-ADOPT-003: skip if already pointing to the right ScanSetting. + if req.OldSettingRef == newSettingName { + return Result{ + SSBName: req.SSBName, + ClusterLabel: req.ClusterLabel, + Skipped: true, + Message: fmt.Sprintf("SSB %s/%s already references ScanSetting %q, no patch needed", req.SSBNamespace, req.SSBName, newSettingName), + } + } + + // IMP-ADOPT-007: if a ScanSetting with the target name already existed + // on the cluster before reconciliation, it's a pre-existing resource + // that would conflict with the ACS-managed one. Skip adoption to + // avoid patching the SSB onto a ScanSetting that ACS doesn't control. + if req.PreExistingScanSettings[newSettingName] { + return Result{ + SSBName: req.SSBName, + ClusterLabel: req.ClusterLabel, + Skipped: true, + Message: fmt.Sprintf("ScanSetting %q already exists on cluster %s but SSB %s/%s references %q; skipping adoption to avoid conflict with pre-existing resource", + newSettingName, req.ClusterLabel, req.SSBNamespace, req.SSBName, req.OldSettingRef), + } + } + + // Poll for the ACS-created ScanSetting to appear on the cluster. + if err := a.waitForScanSetting(ctx, req.COClient, req.SSBNamespace, newSettingName); err != nil { + // IMP-ADOPT-004, IMP-ADOPT-005, IMP-ADOPT-006: timeout is a warning, not an error. + return Result{ + SSBName: req.SSBName, + ClusterLabel: req.ClusterLabel, + TimedOut: true, + Message: fmt.Sprintf("timed out waiting for ScanSetting %q to appear on cluster %s; SSB %s/%s was NOT patched (settingsRef still %q)", + newSettingName, req.ClusterLabel, req.SSBNamespace, req.SSBName, req.OldSettingRef), + } + } + + // IMP-ADOPT-001: patch the SSB's settingsRef to point to the new ScanSetting. + if err := req.COClient.PatchSSBSettingsRef(ctx, req.SSBNamespace, req.SSBName, newSettingName); err != nil { + return Result{ + SSBName: req.SSBName, + ClusterLabel: req.ClusterLabel, + Err: err, + Message: fmt.Sprintf("failed to patch SSB %s/%s settingsRef on cluster %s: %v", + req.SSBNamespace, req.SSBName, req.ClusterLabel, err), + } + } + + return Result{ + SSBName: req.SSBName, + ClusterLabel: req.ClusterLabel, + Adopted: true, + Message: fmt.Sprintf("adopted SSB %s/%s on cluster %s: settingsRef changed from %q to %q", + req.SSBNamespace, req.SSBName, req.ClusterLabel, req.OldSettingRef, newSettingName), + } +} + +// waitForScanSetting polls until the named ScanSetting exists or the timeout expires. +func (a *Adopter) waitForScanSetting(ctx context.Context, client cofetch.COClient, namespace, name string) error { + deadline := time.After(a.PollTimeout) + ticker := time.NewTicker(a.PollInterval) + defer ticker.Stop() + + // Check immediately before first tick. + if _, err := client.GetScanSetting(ctx, namespace, name); err == nil { + return nil + } + + for { + select { + case <-ctx.Done(): + return ctx.Err() + case <-deadline: + return fmt.Errorf("ScanSetting %q not found after %s", name, a.PollTimeout) + case <-ticker.C: + if _, err := client.GetScanSetting(ctx, namespace, name); err == nil { + return nil + } + } + } +} diff --git a/scripts/compliance-operator-importer/internal/adopt/adopt_test.go b/scripts/compliance-operator-importer/internal/adopt/adopt_test.go new file mode 100644 index 0000000000000..be086c66bccd5 --- /dev/null +++ b/scripts/compliance-operator-importer/internal/adopt/adopt_test.go @@ -0,0 +1,375 @@ +package adopt + +import ( + "context" + "errors" + "fmt" + "sync" + "testing" + "time" + + "github.com/stackrox/co-acs-importer/internal/cofetch" +) + +// mockCOClient is a test double for cofetch.COClient that supports: +// - controllable ScanSetting existence (via scanSettings map) +// - tracking PatchSSBSettingsRef calls +// - simulating errors +type mockCOClient struct { + mu sync.Mutex + scanSettings map[string]*cofetch.ScanSetting // key: "namespace/name" + patches []patchCall // recorded PatchSSBSettingsRef calls + patchErr error // if non-nil, PatchSSBSettingsRef returns this +} + +type patchCall struct { + Namespace string + SSBName string + NewSettingsRefName string +} + +func newMockCOClient() *mockCOClient { + return &mockCOClient{ + scanSettings: make(map[string]*cofetch.ScanSetting), + } +} + +func (m *mockCOClient) addScanSetting(namespace, name string) { + m.mu.Lock() + defer m.mu.Unlock() + m.scanSettings[namespace+"/"+name] = &cofetch.ScanSetting{ + Namespace: namespace, + Name: name, + } +} + +func (m *mockCOClient) ListScanSettingBindings(_ context.Context) ([]cofetch.ScanSettingBinding, error) { + return nil, nil +} + +func (m *mockCOClient) GetScanSetting(_ context.Context, namespace, name string) (*cofetch.ScanSetting, error) { + m.mu.Lock() + defer m.mu.Unlock() + ss, ok := m.scanSettings[namespace+"/"+name] + if !ok { + return nil, fmt.Errorf("ScanSetting %q not found in namespace %q", name, namespace) + } + return ss, nil +} + +func (m *mockCOClient) PatchSSBSettingsRef(_ context.Context, namespace, ssbName, newSettingsRefName string) error { + m.mu.Lock() + defer m.mu.Unlock() + if m.patchErr != nil { + return m.patchErr + } + m.patches = append(m.patches, patchCall{ + Namespace: namespace, + SSBName: ssbName, + NewSettingsRefName: newSettingsRefName, + }) + return nil +} + +// Compile-time check. +var _ cofetch.COClient = (*mockCOClient)(nil) + +// TestIMP_ADOPT_001_PatchSettingsRef verifies that the SSB's settingsRef is +// patched to the scan config name after ACS creates the ScanSetting. +func TestIMP_ADOPT_001_PatchSettingsRef(t *testing.T) { + client := newMockCOClient() + client.addScanSetting("openshift-compliance", "cis-weekly") + + adopter := &Adopter{PollInterval: 10 * time.Millisecond, PollTimeout: 1 * time.Second} + results := adopter.Adopt(context.Background(), []Request{{ + SSBName: "cis-weekly", + SSBNamespace: "openshift-compliance", + OldSettingRef: "my-old-setting", + ClusterLabel: "ctx-a", + COClient: client, + }}) + + if len(results) != 1 { + t.Fatalf("expected 1 result, got %d", len(results)) + } + r := results[0] + if !r.Adopted { + t.Errorf("expected Adopted=true, got false; message: %s", r.Message) + } + if len(client.patches) != 1 { + t.Fatalf("expected 1 patch call, got %d", len(client.patches)) + } + p := client.patches[0] + if p.NewSettingsRefName != "cis-weekly" { + t.Errorf("patch newSettingsRefName: want %q, got %q", "cis-weekly", p.NewSettingsRefName) + } + if p.SSBName != "cis-weekly" { + t.Errorf("patch SSBName: want %q, got %q", "cis-weekly", p.SSBName) + } +} + +// TestIMP_ADOPT_002_LogMessage verifies the result message mentions the adoption. +func TestIMP_ADOPT_002_LogMessage(t *testing.T) { + client := newMockCOClient() + client.addScanSetting("openshift-compliance", "cis-weekly") + + adopter := &Adopter{PollInterval: 10 * time.Millisecond, PollTimeout: 1 * time.Second} + results := adopter.Adopt(context.Background(), []Request{{ + SSBName: "cis-weekly", + SSBNamespace: "openshift-compliance", + OldSettingRef: "my-old-setting", + ClusterLabel: "ctx-a", + COClient: client, + }}) + + r := results[0] + if r.Message == "" { + t.Error("expected non-empty message for adopted SSB") + } + // Message should mention old and new setting names. + for _, want := range []string{"my-old-setting", "cis-weekly", "adopted"} { + if !containsStr(r.Message, want) { + t.Errorf("message should contain %q, got %q", want, r.Message) + } + } +} + +// TestIMP_ADOPT_003_SkipAlreadyAdopted verifies that no patch is issued when +// the SSB already references the correct ScanSetting. +func TestIMP_ADOPT_003_SkipAlreadyAdopted(t *testing.T) { + client := newMockCOClient() + // ScanSetting doesn't even need to exist — we skip before polling. + + adopter := &Adopter{PollInterval: 10 * time.Millisecond, PollTimeout: 1 * time.Second} + results := adopter.Adopt(context.Background(), []Request{{ + SSBName: "cis-weekly", + SSBNamespace: "openshift-compliance", + OldSettingRef: "cis-weekly", // already correct! + ClusterLabel: "ctx-a", + COClient: client, + }}) + + r := results[0] + if !r.Skipped { + t.Error("expected Skipped=true when settingsRef already matches") + } + if r.Adopted { + t.Error("expected Adopted=false when skipped") + } + if len(client.patches) != 0 { + t.Errorf("expected 0 patch calls, got %d", len(client.patches)) + } +} + +// TestIMP_ADOPT_004_005_006_Timeout verifies that a timeout waiting for the +// ScanSetting results in a warning (not an error), and no patch. +func TestIMP_ADOPT_004_005_006_Timeout(t *testing.T) { + client := newMockCOClient() + // Don't add the ScanSetting — it never appears. + + adopter := &Adopter{PollInterval: 10 * time.Millisecond, PollTimeout: 50 * time.Millisecond} + results := adopter.Adopt(context.Background(), []Request{{ + SSBName: "cis-weekly", + SSBNamespace: "openshift-compliance", + OldSettingRef: "my-old-setting", + ClusterLabel: "ctx-a", + COClient: client, + }}) + + r := results[0] + if !r.TimedOut { + t.Error("expected TimedOut=true") + } + if r.Adopted { + t.Error("expected Adopted=false on timeout") + } + if r.Err != nil { + t.Errorf("expected Err=nil on timeout (warning, not error), got %v", r.Err) + } + if len(client.patches) != 0 { + t.Errorf("expected 0 patch calls on timeout, got %d", len(client.patches)) + } +} + +// TestIMP_ADOPT_007_MultiClusterIndependent verifies that adoption patches +// SSBs on each cluster independently. +func TestIMP_ADOPT_007_MultiClusterIndependent(t *testing.T) { + clientA := newMockCOClient() + clientA.addScanSetting("openshift-compliance", "cis-weekly") + + clientB := newMockCOClient() + clientB.addScanSetting("openshift-compliance", "cis-weekly") + + adopter := &Adopter{PollInterval: 10 * time.Millisecond, PollTimeout: 1 * time.Second} + results := adopter.Adopt(context.Background(), []Request{ + { + SSBName: "cis-weekly", + SSBNamespace: "openshift-compliance", + OldSettingRef: "setting-a", + ClusterLabel: "ctx-a", + COClient: clientA, + }, + { + SSBName: "cis-weekly", + SSBNamespace: "openshift-compliance", + OldSettingRef: "setting-b", + ClusterLabel: "ctx-b", + COClient: clientB, + }, + }) + + if len(results) != 2 { + t.Fatalf("expected 2 results, got %d", len(results)) + } + for i, r := range results { + if !r.Adopted { + t.Errorf("results[%d]: expected Adopted=true, got false; message: %s", i, r.Message) + } + } + if len(clientA.patches) != 1 { + t.Errorf("clientA: expected 1 patch, got %d", len(clientA.patches)) + } + if len(clientB.patches) != 1 { + t.Errorf("clientB: expected 1 patch, got %d", len(clientB.patches)) + } +} + +// TestIMP_ADOPT_008_PartialSuccess verifies that a timeout on one cluster +// does not block adoption on another. +func TestIMP_ADOPT_008_PartialSuccess(t *testing.T) { + clientA := newMockCOClient() + clientA.addScanSetting("openshift-compliance", "cis-weekly") + + clientB := newMockCOClient() + // Don't add ScanSetting on B — it times out. + + adopter := &Adopter{PollInterval: 10 * time.Millisecond, PollTimeout: 50 * time.Millisecond} + results := adopter.Adopt(context.Background(), []Request{ + { + SSBName: "cis-weekly", + SSBNamespace: "openshift-compliance", + OldSettingRef: "setting-a", + ClusterLabel: "ctx-a", + COClient: clientA, + }, + { + SSBName: "cis-weekly", + SSBNamespace: "openshift-compliance", + OldSettingRef: "setting-b", + ClusterLabel: "ctx-b", + COClient: clientB, + }, + }) + + if len(results) != 2 { + t.Fatalf("expected 2 results, got %d", len(results)) + } + + // ctx-a should succeed. + if !results[0].Adopted { + t.Errorf("ctx-a: expected Adopted=true; message: %s", results[0].Message) + } + // ctx-b should time out without error. + if !results[1].TimedOut { + t.Errorf("ctx-b: expected TimedOut=true; message: %s", results[1].Message) + } + if results[1].Err != nil { + t.Errorf("ctx-b: expected Err=nil on timeout, got %v", results[1].Err) + } +} + +// TestIMP_ADOPT_PatchError verifies that a patch failure is recorded as an error. +func TestIMP_ADOPT_PatchError(t *testing.T) { + client := newMockCOClient() + client.addScanSetting("openshift-compliance", "cis-weekly") + client.patchErr = errors.New("permission denied") + + adopter := &Adopter{PollInterval: 10 * time.Millisecond, PollTimeout: 1 * time.Second} + results := adopter.Adopt(context.Background(), []Request{{ + SSBName: "cis-weekly", + SSBNamespace: "openshift-compliance", + OldSettingRef: "my-old-setting", + ClusterLabel: "ctx-a", + COClient: client, + }}) + + r := results[0] + if r.Adopted { + t.Error("expected Adopted=false on patch error") + } + if r.Err == nil { + t.Error("expected non-nil Err on patch failure") + } +} + +// TestIMP_ADOPT_DelayedScanSetting verifies that the adopter polls and +// succeeds when the ScanSetting appears after a delay. +func TestIMP_ADOPT_DelayedScanSetting(t *testing.T) { + client := newMockCOClient() + + // Add the ScanSetting after a short delay. + go func() { + time.Sleep(30 * time.Millisecond) + client.addScanSetting("openshift-compliance", "cis-weekly") + }() + + adopter := &Adopter{PollInterval: 10 * time.Millisecond, PollTimeout: 1 * time.Second} + results := adopter.Adopt(context.Background(), []Request{{ + SSBName: "cis-weekly", + SSBNamespace: "openshift-compliance", + OldSettingRef: "my-old-setting", + ClusterLabel: "ctx-a", + COClient: client, + }}) + + r := results[0] + if !r.Adopted { + t.Errorf("expected Adopted=true after delayed ScanSetting; message: %s", r.Message) + } +} + +// TestIMP_ADOPT_PreExistingScanSettingSkip verifies that adoption is skipped +// when a ScanSetting with the target name already existed before reconciliation. +func TestIMP_ADOPT_PreExistingScanSettingSkip(t *testing.T) { + client := newMockCOClient() + // The ScanSetting exists on the cluster (pre-existing). + client.addScanSetting("openshift-compliance", "cis-weekly") + + adopter := &Adopter{PollInterval: 10 * time.Millisecond, PollTimeout: 1 * time.Second} + results := adopter.Adopt(context.Background(), []Request{{ + SSBName: "cis-weekly", + SSBNamespace: "openshift-compliance", + OldSettingRef: "my-old-setting", + ClusterLabel: "ctx-a", + COClient: client, + // Mark the ScanSetting as pre-existing. + PreExistingScanSettings: map[string]bool{"cis-weekly": true}, + }}) + + r := results[0] + if !r.Skipped { + t.Errorf("expected Skipped=true for pre-existing ScanSetting; message: %s", r.Message) + } + if r.Adopted { + t.Error("expected Adopted=false when pre-existing ScanSetting conflicts") + } + if len(client.patches) != 0 { + t.Errorf("expected 0 patch calls, got %d", len(client.patches)) + } + if !containsStr(r.Message, "pre-existing") { + t.Errorf("message should mention pre-existing, got %q", r.Message) + } +} + +func containsStr(s, substr string) bool { + return len(s) >= len(substr) && searchStr(s, substr) +} + +func searchStr(s, substr string) bool { + for i := 0; i <= len(s)-len(substr); i++ { + if s[i:i+len(substr)] == substr { + return true + } + } + return false +} diff --git a/scripts/compliance-operator-importer/internal/cofetch/client.go b/scripts/compliance-operator-importer/internal/cofetch/client.go new file mode 100644 index 0000000000000..6347179bf9f3e --- /dev/null +++ b/scripts/compliance-operator-importer/internal/cofetch/client.go @@ -0,0 +1,232 @@ +package cofetch + +import ( + "context" + "encoding/json" + "errors" + "fmt" + + "github.com/stackrox/co-acs-importer/internal/models" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" +) + +// GVRs for Compliance Operator resources. +var ( + scanSettingBindingGVR = schema.GroupVersionResource{ + Group: "compliance.openshift.io", + Version: "v1alpha1", + Resource: "scansettingbindings", + } + scanSettingGVR = schema.GroupVersionResource{ + Group: "compliance.openshift.io", + Version: "v1alpha1", + Resource: "scansettings", + } +) + +// k8sClient is the production implementation of COClient backed by a dynamic k8s client. +type k8sClient struct { + dynamic dynamic.Interface + namespace string // empty string means all namespaces +} + +// NewClient creates a COClient using the current kubeconfig context. +// If cfg.COAllNamespaces is true, resources are listed across all namespaces. +func NewClient(cfg *models.Config) (COClient, error) { + loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() + overrides := &clientcmd.ConfigOverrides{} + + kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, overrides) + restConfig, err := kubeConfig.ClientConfig() + if err != nil { + return nil, fmt.Errorf("build kubeconfig: %w", err) + } + + dynClient, err := dynamic.NewForConfig(restConfig) + if err != nil { + return nil, fmt.Errorf("create dynamic client: %w", err) + } + + ns := cfg.CONamespace + if cfg.COAllNamespaces { + ns = "" + } + + return &k8sClient{ + dynamic: dynClient, + namespace: ns, + }, nil +} + +// NewClientForContext creates a COClient for a specific context in the active kubeconfig. +func NewClientForContext(contextName string, namespace string, allNamespaces bool) (COClient, error) { + loadingRules := clientcmd.NewDefaultClientConfigLoadingRules() + overrides := &clientcmd.ConfigOverrides{CurrentContext: contextName} + kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, overrides) + + restConfig, err := kubeConfig.ClientConfig() + if err != nil { + return nil, fmt.Errorf("build kubeconfig for context %q: %w", contextName, err) + } + + return NewClientFromRestConfig(restConfig, namespace, allNamespaces) +} + +// NewClientFromRestConfig creates a COClient from an existing rest.Config. +// This avoids kubeconfig merging, preventing credential collisions when +// multiple kubeconfig files define the same user name. +func NewClientFromRestConfig(restConfig *rest.Config, namespace string, allNamespaces bool) (COClient, error) { + dynClient, err := dynamic.NewForConfig(restConfig) + if err != nil { + return nil, fmt.Errorf("create dynamic client: %w", err) + } + + ns := namespace + if allNamespaces { + ns = "" + } + + return &k8sClient{ + dynamic: dynClient, + namespace: ns, + }, nil +} + +// ListScanSettingBindings returns all ScanSettingBindings from the configured namespace(s). +func (c *k8sClient) ListScanSettingBindings(ctx context.Context) ([]ScanSettingBinding, error) { + list, err := c.dynamic.Resource(scanSettingBindingGVR).Namespace(c.namespace).List(ctx, metav1.ListOptions{}) + if err != nil { + return nil, fmt.Errorf("list ScanSettingBindings in namespace %q: %w", c.namespace, err) + } + + result := make([]ScanSettingBinding, 0, len(list.Items)) + for _, item := range list.Items { + ssb, parseErr := parseScanSettingBinding(item.Object) + if parseErr != nil { + // Skip malformed resources rather than aborting the whole list. + continue + } + result = append(result, ssb) + } + return result, nil +} + +// GetScanSetting fetches a named ScanSetting from the given namespace. +func (c *k8sClient) GetScanSetting(ctx context.Context, namespace, name string) (*ScanSetting, error) { + obj, err := c.dynamic.Resource(scanSettingGVR).Namespace(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return nil, fmt.Errorf("get ScanSetting %q in namespace %q: %w", name, namespace, err) + } + + ss, err := parseScanSetting(obj.Object) + if err != nil { + return nil, fmt.Errorf("parse ScanSetting %q: %w", name, err) + } + return ss, nil +} + +// parseScanSettingBinding converts an unstructured map into a ScanSettingBinding. +func parseScanSettingBinding(obj map[string]interface{}) (ScanSettingBinding, error) { + meta, _ := obj["metadata"].(map[string]interface{}) + name, _ := meta["name"].(string) + namespace, _ := meta["namespace"].(string) + + // profiles and settingsRef are top-level fields in the ScanSettingBinding + // resource (not nested under spec). spec is always empty in practice. + + // Parse profiles list into []NamedObjectReference. + var profiles []NamedObjectReference + if rawProfiles, ok := obj["profiles"].([]interface{}); ok { + for _, rp := range rawProfiles { + pm, ok := rp.(map[string]interface{}) + if !ok { + continue + } + profiles = append(profiles, NamedObjectReference{ + Name: stringField(pm, "name"), + Kind: stringField(pm, "kind"), + APIGroup: stringField(pm, "apiGroup"), + }) + } + } + + // Parse settingsRef as a NamedObjectReference. + var settingsRef *NamedObjectReference + if sr, ok := obj["settingsRef"].(map[string]interface{}); ok { + settingsRef = &NamedObjectReference{ + Name: stringField(sr, "name"), + Kind: stringField(sr, "kind"), + APIGroup: stringField(sr, "apiGroup"), + } + } + + if name == "" { + return ScanSettingBinding{}, errors.New("ScanSettingBinding has no name") + } + + // Populate ScanSettingName from settingsRef.Name for backward compatibility + // with callers that read the flat field (e.g. mapping package). + scanSettingName := "" + if settingsRef != nil { + scanSettingName = settingsRef.Name + } + + return ScanSettingBinding{ + Namespace: namespace, + Name: name, + ScanSettingName: scanSettingName, + SettingsRef: settingsRef, + Profiles: profiles, + }, nil +} + +// parseScanSetting converts an unstructured map into a ScanSetting. +func parseScanSetting(obj map[string]interface{}) (*ScanSetting, error) { + meta, _ := obj["metadata"].(map[string]interface{}) + name, _ := meta["name"].(string) + namespace, _ := meta["namespace"].(string) + + // schedule is a top-level field in the ScanSetting resource. + schedule, _ := obj["schedule"].(string) + + if name == "" { + return nil, errors.New("ScanSetting has no name") + } + + return &ScanSetting{ + Namespace: namespace, + Name: name, + Schedule: schedule, + }, nil +} + +// PatchSSBSettingsRef patches the settingsRef.name of a ScanSettingBinding. +func (c *k8sClient) PatchSSBSettingsRef(ctx context.Context, namespace, ssbName, newSettingsRefName string) error { + patch := map[string]interface{}{ + "settingsRef": map[string]interface{}{ + "name": newSettingsRefName, + }, + } + patchData, err := json.Marshal(patch) + if err != nil { + return fmt.Errorf("marshal patch: %w", err) + } + _, err = c.dynamic.Resource(scanSettingBindingGVR).Namespace(namespace).Patch( + ctx, ssbName, types.MergePatchType, patchData, metav1.PatchOptions{}, + ) + if err != nil { + return fmt.Errorf("patch SSB %q settingsRef in namespace %q: %w", ssbName, namespace, err) + } + return nil +} + +// stringField safely extracts a string value from an unstructured map. +func stringField(m map[string]interface{}, key string) string { + v, _ := m[key].(string) + return v +} diff --git a/scripts/compliance-operator-importer/internal/cofetch/types.go b/scripts/compliance-operator-importer/internal/cofetch/types.go new file mode 100644 index 0000000000000..0c7430f8fbd0b --- /dev/null +++ b/scripts/compliance-operator-importer/internal/cofetch/types.go @@ -0,0 +1,55 @@ +// Package cofetch defines types for Compliance Operator resource discovery. +package cofetch + +import "context" + +// NamedObjectReference is a lightweight reference to a named Kubernetes object. +// It mirrors the CO NamedObjectReference type without importing the CO library. +type NamedObjectReference struct { + Name string + Kind string // "Profile" or "TailoredProfile"; empty defaults to "Profile" (IMP-MAP-002) + APIGroup string +} + +// ResolvedKind returns the kind, defaulting to "Profile" when empty (IMP-MAP-002). +func (r NamedObjectReference) ResolvedKind() string { + if r.Kind == "" { + return "Profile" + } + return r.Kind +} + +// ProfileRef is an alias for NamedObjectReference used in profile reference lists. +// It is a type alias (not a new type) so []ProfileRef and []NamedObjectReference are +// interchangeable, allowing both client.go and mapping_test.go to construct profiles. +type ProfileRef = NamedObjectReference + +// ScanSettingBinding is a simplified representation of the Compliance Operator +// ScanSettingBinding resource (compliance.openshift.io/v1alpha1). +// Fields are extracted from unstructured Kubernetes API responses. +type ScanSettingBinding struct { + Namespace string + Name string + ScanSettingName string // name of the referenced ScanSetting (flattened from SettingsRef.Name) + SettingsRef *NamedObjectReference // full structured settings reference + Profiles []NamedObjectReference +} + +// ScanSetting is a simplified representation of the Compliance Operator ScanSetting +// resource (compliance.openshift.io/v1alpha1). +type ScanSetting struct { + Namespace string + Name string + // Schedule is the cron expression from complianceSuiteSettings.schedule. + Schedule string +} + +// COClient abstracts Compliance Operator resource discovery. +type COClient interface { + // ListScanSettingBindings returns all ScanSettingBindings in the configured namespace(s). + ListScanSettingBindings(ctx context.Context) ([]ScanSettingBinding, error) + // GetScanSetting fetches a named ScanSetting from the given namespace. + GetScanSetting(ctx context.Context, namespace, name string) (*ScanSetting, error) + // PatchSSBSettingsRef patches the settingsRef.name of a ScanSettingBinding. + PatchSSBSettingsRef(ctx context.Context, namespace, ssbName, newSettingsRefName string) error +} diff --git a/scripts/compliance-operator-importer/internal/config/config.go b/scripts/compliance-operator-importer/internal/config/config.go new file mode 100644 index 0000000000000..f37a265c24e75 --- /dev/null +++ b/scripts/compliance-operator-importer/internal/config/config.go @@ -0,0 +1,295 @@ +// Package config parses and validates all CLI flags and environment variables +// for the CO -> ACS importer tool. +package config + +import ( + "errors" + "flag" + "fmt" + "os" + "strings" + "time" + + "github.com/stackrox/co-acs-importer/internal/models" +) + +// ErrHelpRequested is returned by ParseAndValidate when --help is passed. +// Callers should treat this as a successful exit (code 0). +var ErrHelpRequested = errors.New("help requested") + +const ( + defaultTimeout = 30 * time.Second + defaultMaxRetries = 5 + defaultCONamespace = "openshift-compliance" + defaultUsername = "admin" +) + +// repeatableStringFlag is a custom flag type for collecting multiple values. +type repeatableStringFlag struct { + values *[]string +} + +func (f *repeatableStringFlag) String() string { + if f.values == nil { + return "" + } + return strings.Join(*f.values, ",") +} + +func (f *repeatableStringFlag) Set(value string) error { + *f.values = append(*f.values, value) + return nil +} + +// ParseAndValidate parses flags from args (typically os.Args[1:]), resolves +// environment variables, and validates the resulting Config. +// It uses a dedicated FlagSet so it is safe to call from tests. +func ParseAndValidate(args []string) (*models.Config, error) { + fs := flag.NewFlagSet("co-acs-scan-importer", flag.ContinueOnError) + + // Override default Usage with structured help text. + fs.Usage = func() { printUsage(fs) } + + // --- ACS connection (IMP-CLI-001) --- + endpoint := fs.String("endpoint", os.Getenv("ROX_ENDPOINT"), + "ACS Central endpoint URL.\n"+ + "Bare hostnames get https:// prepended automatically.\n"+ + "Also read from the ROX_ENDPOINT environment variable.") + + // --- ACS authentication (IMP-CLI-024) --- + username := fs.String("username", "", + "Username for basic auth (default \"admin\").\n"+ + "Also read from ROX_ADMIN_USER environment variable.") + + // --- Compliance Operator namespace --- + coNamespace := fs.String("co-namespace", defaultCONamespace, + "Namespace containing Compliance Operator resources.\n"+ + "Overridden by --co-all-namespaces.") + coAllNamespaces := fs.Bool("co-all-namespaces", false, + "Read Compliance Operator resources from all namespaces.") + + // --- Import behavior --- + dryRun := fs.Bool("dry-run", false, + "Preview all actions without making any changes to ACS.\n"+ + "The report is still generated.") + overwriteExisting := fs.Bool("overwrite-existing", false, + "Update existing ACS scan configurations instead of skipping them.\n"+ + "Without this flag, existing configs are skipped with a warning.") + reportJSON := fs.String("report-json", "", + "Write a structured JSON report to this file path.") + + // --- HTTP / TLS --- + requestTimeout := fs.Duration("request-timeout", defaultTimeout, + "Timeout for each HTTP request to ACS (e.g. 30s, 1m).") + maxRetries := fs.Int("max-retries", defaultMaxRetries, + "Maximum retry attempts for transient ACS API failures (429, 502, 503, 504).") + caCertFile := fs.String("ca-cert-file", "", + "Path to a PEM-encoded CA certificate bundle for TLS verification.") + insecureSkipVerify := fs.Bool("insecure-skip-verify", false, + "Skip TLS certificate verification. Not recommended for production.") + + // --- Context filter --- + var contexts []string + fs.Var(&repeatableStringFlag{values: &contexts}, "context", + "Kubernetes context name to process (repeatable).\n"+ + "By default all contexts from the merged kubeconfig are used.\n"+ + "Use --context to limit processing to specific contexts.") + + if err := fs.Parse(args); err != nil { + if errors.Is(err, flag.ErrHelp) { + return nil, ErrHelpRequested + } + return nil, fmt.Errorf("flag parse error: %w", err) + } + + // Resolve username: flag > env > default. + resolvedUsername := *username + if resolvedUsername == "" { + resolvedUsername = os.Getenv("ROX_ADMIN_USER") + } + if resolvedUsername == "" { + resolvedUsername = defaultUsername + } + + cfg := &models.Config{ + ACSEndpoint: *endpoint, + Username: resolvedUsername, + CONamespace: *coNamespace, + COAllNamespaces: *coAllNamespaces, + DryRun: *dryRun, + ReportJSON: *reportJSON, + RequestTimeout: *requestTimeout, + MaxRetries: *maxRetries, + CACertFile: *caCertFile, + InsecureSkipVerify: *insecureSkipVerify, + OverwriteExisting: *overwriteExisting, + Contexts: contexts, + } + + // IMP-CLI-002: auto-infer auth mode from env vars. + if err := inferAuthMode(cfg); err != nil { + return nil, err + } + + if err := validate(cfg); err != nil { + return nil, err + } + return cfg, nil +} + +// inferAuthMode sets cfg.AuthMode based on which env vars are present (IMP-CLI-002). +// - ROX_API_TOKEN set → token mode +// - ROX_ADMIN_PASSWORD set → basic mode +// - both set → ambiguous error (IMP-CLI-025) +// - neither set → error with help text (IMP-CLI-025) +func inferAuthMode(cfg *models.Config) error { + hasToken := os.Getenv("ROX_API_TOKEN") != "" + hasPassword := os.Getenv("ROX_ADMIN_PASSWORD") != "" + + switch { + case hasToken && hasPassword: + return errors.New( + "ambiguous auth: both ROX_API_TOKEN and ROX_ADMIN_PASSWORD are set\n" + + "Fix: unset one of them to select a single auth mode", + ) + case hasToken: + cfg.AuthMode = models.AuthModeToken + case hasPassword: + cfg.AuthMode = models.AuthModeBasic + default: + return errors.New( + "no auth credentials found\n" + + "Fix: set ROX_API_TOKEN for token auth, or ROX_ADMIN_PASSWORD for basic auth", + ) + } + return nil +} + +// validate checks all cross-field invariants after flags and env vars are resolved. +func validate(cfg *models.Config) error { + if cfg.ACSEndpoint == "" { + return errors.New("--endpoint is required (or set ROX_ENDPOINT)") + } + + // IMP-CLI-013: auto-prepend https:// for bare hostnames; reject http://. + if strings.HasPrefix(cfg.ACSEndpoint, "http://") { + return fmt.Errorf("--endpoint must not use http:// (got %q)\nFix: use https:// or omit the scheme", cfg.ACSEndpoint) + } + if !strings.HasPrefix(cfg.ACSEndpoint, "https://") { + cfg.ACSEndpoint = "https://" + cfg.ACSEndpoint + } + + // Strip trailing slash for consistency. + cfg.ACSEndpoint = strings.TrimRight(cfg.ACSEndpoint, "/") + + // Auth material validation (IMP-CLI-014). + switch cfg.AuthMode { + case models.AuthModeToken: + if os.Getenv("ROX_API_TOKEN") == "" { + return errors.New( + "ROX_API_TOKEN is empty\n" + + "Fix: export ROX_API_TOKEN=", + ) + } + case models.AuthModeBasic: + if os.Getenv("ROX_ADMIN_PASSWORD") == "" { + return errors.New( + "ROX_ADMIN_PASSWORD is empty\n" + + "Fix: export ROX_ADMIN_PASSWORD=", + ) + } + } + + if cfg.COAllNamespaces { + cfg.CONamespace = "" // --co-all-namespaces overrides any namespace setting + } + + if cfg.MaxRetries < 0 { + return fmt.Errorf("--max-retries must be >= 0 (got %d)", cfg.MaxRetries) + } + + return nil +} + +// printUsage writes structured help text to stderr. +func printUsage(fs *flag.FlagSet) { + w := os.Stderr + fmt.Fprint(w, `co-acs-scan-importer - Import Compliance Operator scan schedules into ACS + +DESCRIPTION + Reads ScanSettingBinding resources from Kubernetes clusters running the + Compliance Operator and creates equivalent scan configurations in Red Hat + Advanced Cluster Security (ACS) via the v2 API. + + By default, all contexts in the merged kubeconfig are processed. The ACS + cluster ID for each context is auto-discovered via the admission-control + ConfigMap, OpenShift ClusterVersion, or Helm cluster name secret. + +USAGE + # All clusters in kubeconfig (dry-run): + co-acs-scan-importer \ + --endpoint central.example.com \ + --dry-run + + # Specific clusters only: + co-acs-scan-importer \ + --endpoint central.example.com \ + --context cluster-a \ + --context cluster-b + + # Multiple kubeconfig files merged: + KUBECONFIG=a.yaml:b.yaml co-acs-scan-importer \ + --endpoint central.example.com + + # Update existing ACS scan configs instead of skipping them: + co-acs-scan-importer \ + --endpoint central.example.com \ + --overwrite-existing + + # Basic auth (for development/testing): + ROX_ADMIN_PASSWORD=secret co-acs-scan-importer \ + --endpoint central.example.com \ + --username admin \ + --insecure-skip-verify + +AUTHENTICATION + Auth mode is auto-inferred from environment variables: + - Set ROX_API_TOKEN for API token auth (production). + - Set ROX_ADMIN_PASSWORD for basic auth (development/testing). + - Setting both is an error (ambiguous). + - Setting neither is an error. + +MULTI-CLUSTER + The importer processes all contexts in the merged kubeconfig by default. + Use --context (repeatable) to limit processing to specific contexts. + Merge kubeconfig files via: KUBECONFIG=a.yaml:b.yaml:c.yaml + + ScanSettingBindings with the same name across multiple clusters are merged + into a single ACS scan configuration targeting all matched clusters. The + importer verifies that profiles and schedules match across clusters and + reports an error if they differ. + +AUTO-DISCOVERY + The ACS cluster ID is auto-discovered for each context using the + following chain (first success wins): + + 1. admission-control ConfigMap "cluster-id" key (namespace: stackrox) + 2. OpenShift ClusterVersion spec.clusterID matched against ACS provider metadata + 3. helm-effective-cluster-name secret matched against ACS cluster name + +EXIT CODES + 0 All bindings processed successfully (or nothing to do). + 1 Fatal error (bad config, auth failure, connectivity issue). + 2 Partial success (some bindings failed; see report for details). + +ENVIRONMENT VARIABLES + ROX_ENDPOINT ACS Central URL (alternative to --endpoint). + ROX_API_TOKEN API token for token auth mode. + ROX_ADMIN_PASSWORD Password for basic auth mode. + ROX_ADMIN_USER Username for basic auth (default "admin"). + KUBECONFIG Colon-separated list of kubeconfig file paths. + +FLAGS +`) + fs.PrintDefaults() +} diff --git a/scripts/compliance-operator-importer/internal/config/config_multicluster_test.go b/scripts/compliance-operator-importer/internal/config/config_multicluster_test.go new file mode 100644 index 0000000000000..9696bc29592cd --- /dev/null +++ b/scripts/compliance-operator-importer/internal/config/config_multicluster_test.go @@ -0,0 +1,63 @@ +package config + +import ( + "testing" +) + +// TestIMP_CLI_003_ContextRepeatable verifies that --context can be +// repeated to filter which kubeconfig contexts are processed. +func TestIMP_CLI_003_ContextRepeatable(t *testing.T) { + setenv(t, "ROX_API_TOKEN", "tok") + + cfg, err := ParseAndValidate([]string{ + "--endpoint", "https://central.example.com", + "--context", "ctx-a", + "--context", "ctx-b", + }) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if len(cfg.Contexts) != 2 { + t.Errorf("expected 2 contexts, got %d", len(cfg.Contexts)) + } + if cfg.Contexts[0] != "ctx-a" { + t.Errorf("expected first context 'ctx-a', got %q", cfg.Contexts[0]) + } + if cfg.Contexts[1] != "ctx-b" { + t.Errorf("expected second context 'ctx-b', got %q", cfg.Contexts[1]) + } +} + +// TestIMP_CLI_003_NoContextMeansAll verifies that omitting --context +// results in an empty Contexts slice (meaning "all contexts"). +func TestIMP_CLI_003_NoContextMeansAll(t *testing.T) { + setenv(t, "ROX_API_TOKEN", "tok") + + cfg, err := ParseAndValidate([]string{ + "--endpoint", "https://central.example.com", + }) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if len(cfg.Contexts) != 0 { + t.Errorf("expected empty contexts (all), got %v", cfg.Contexts) + } +} + +// TestIMP_CLI_003_RemovedFlagsRejected verifies that removed multi-cluster +// flags are not accepted. +func TestIMP_CLI_003_RemovedFlagsRejected(t *testing.T) { + setenv(t, "ROX_API_TOKEN", "tok") + + for _, flag := range []string{"--kubeconfig", "--kubecontext", "--cluster"} { + t.Run(flag, func(t *testing.T) { + _, err := ParseAndValidate([]string{ + "--endpoint", "https://central.example.com", + flag, "some-value", + }) + if err == nil { + t.Errorf("expected error for %s, got nil", flag) + } + }) + } +} diff --git a/scripts/compliance-operator-importer/internal/config/config_test.go b/scripts/compliance-operator-importer/internal/config/config_test.go new file mode 100644 index 0000000000000..ef1b2d1972039 --- /dev/null +++ b/scripts/compliance-operator-importer/internal/config/config_test.go @@ -0,0 +1,744 @@ +package config + +import ( + "os" + "strings" + "testing" + "time" + + "github.com/stackrox/co-acs-importer/internal/models" +) + +// minimalValidArgs returns args that satisfy all required flags when +// ROX_API_TOKEN is pre-set by the caller. +func minimalValidArgs(overrides ...string) []string { + base := []string{ + "--endpoint", "https://central.example.com", + "--co-namespace", "openshift-compliance", + } + return append(base, overrides...) +} + +// setenv is a test helper that sets an env var and returns a cleanup func. +func setenv(t *testing.T, key, value string) { + t.Helper() + t.Setenv(key, value) +} + +// clearAuthEnv ensures both auth env vars are unset for a clean test. +func clearAuthEnv(t *testing.T) { + t.Helper() + t.Setenv("ROX_API_TOKEN", "") + t.Setenv("ROX_ADMIN_PASSWORD", "") + t.Setenv("ROX_ADMIN_USER", "") + t.Setenv("ROX_ENDPOINT", "") +} + +// =========================================================================== +// IMP-CLI-001: --endpoint / ROX_ENDPOINT +// =========================================================================== + +func TestIMP_CLI_001_EndpointRequired(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") + + _, err := ParseAndValidate([]string{"--co-namespace", "openshift-compliance"}) + if err == nil { + t.Fatal("expected error for missing --endpoint, got nil") + } + if !strings.Contains(err.Error(), "--endpoint") { + t.Errorf("expected error to mention --endpoint, got: %q", err.Error()) + } +} + +func TestIMP_CLI_001_EndpointFromFlag(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") + + cfg, err := ParseAndValidate(minimalValidArgs()) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.ACSEndpoint != "https://central.example.com" { + t.Errorf("expected endpoint from flag, got %q", cfg.ACSEndpoint) + } +} + +func TestIMP_CLI_001_EndpointFromEnv(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_ENDPOINT", "https://central.example.com") + setenv(t, "ROX_API_TOKEN", "tok") + + cfg, err := ParseAndValidate([]string{"--co-namespace", "openshift-compliance"}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.ACSEndpoint != "https://central.example.com" { + t.Errorf("expected endpoint from ROX_ENDPOINT env, got %q", cfg.ACSEndpoint) + } +} + +func TestIMP_CLI_001_FlagOverridesEnv(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_ENDPOINT", "https://env-central.example.com") + setenv(t, "ROX_API_TOKEN", "tok") + + cfg, err := ParseAndValidate([]string{ + "--endpoint", "https://flag-central.example.com", + "--co-namespace", "openshift-compliance", + }) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.ACSEndpoint != "https://flag-central.example.com" { + t.Errorf("expected flag to override env, got %q", cfg.ACSEndpoint) + } +} + +func TestIMP_CLI_001_EmptyEndpointEnvNotAccepted(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_ENDPOINT", "") + setenv(t, "ROX_API_TOKEN", "tok") + + _, err := ParseAndValidate([]string{"--co-namespace", "openshift-compliance"}) + if err == nil { + t.Fatal("expected error for empty ROX_ENDPOINT, got nil") + } +} + +// =========================================================================== +// IMP-CLI-002: auto-inferred auth mode +// =========================================================================== + +func TestIMP_CLI_002_TokenAutoInferred(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") + + cfg, err := ParseAndValidate(minimalValidArgs()) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.AuthMode != models.AuthModeToken { + t.Errorf("expected token mode inferred, got %q", cfg.AuthMode) + } +} + +func TestIMP_CLI_002_BasicAutoInferred(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_ADMIN_PASSWORD", "secret") + + cfg, err := ParseAndValidate(minimalValidArgs()) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.AuthMode != models.AuthModeBasic { + t.Errorf("expected basic mode inferred, got %q", cfg.AuthMode) + } +} + +func TestIMP_CLI_002_NoOldAuthModeFlag(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") + + // --acs-auth-mode should be rejected as an unknown flag. + _, err := ParseAndValidate(minimalValidArgs("--acs-auth-mode", "token")) + if err == nil { + t.Fatal("expected error for removed --acs-auth-mode flag, got nil") + } +} + +func TestIMP_CLI_002_NoOldTokenEnvFlag(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") + + _, err := ParseAndValidate(minimalValidArgs("--acs-token-env", "MY_TOKEN")) + if err == nil { + t.Fatal("expected error for removed --acs-token-env flag, got nil") + } +} + +func TestIMP_CLI_002_NoOldPasswordEnvFlag(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") + + _, err := ParseAndValidate(minimalValidArgs("--acs-password-env", "MY_PWD")) + if err == nil { + t.Fatal("expected error for removed --acs-password-env flag, got nil") + } +} + +func TestIMP_CLI_002_NoOldEndpointFlag(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") + + _, err := ParseAndValidate([]string{ + "--acs-endpoint", "https://central.example.com", + "--co-namespace", "openshift-compliance", + }) + if err == nil { + t.Fatal("expected error for removed --acs-endpoint flag, got nil") + } +} + +func TestIMP_CLI_002_NoOldUsernameFlag(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_ADMIN_PASSWORD", "secret") + + _, err := ParseAndValidate(minimalValidArgs("--acs-username", "admin")) + if err == nil { + t.Fatal("expected error for removed --acs-username flag, got nil") + } +} + +func TestIMP_CLI_002_NoOldSourceKubecontextFlag(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") + + _, err := ParseAndValidate(minimalValidArgs("--source-kubecontext", "myctx")) + if err == nil { + t.Fatal("expected error for removed --source-kubecontext flag, got nil") + } +} + +func TestIMP_CLI_002_NoOldClusterIDFlag(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") + + _, err := ParseAndValidate(minimalValidArgs("--acs-cluster-id", "uuid")) + if err == nil { + t.Fatal("expected error for removed --acs-cluster-id flag, got nil") + } +} + +// =========================================================================== +// IMP-CLI-013: endpoint scheme handling +// =========================================================================== + +func TestIMP_CLI_013_HTTPSAccepted(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") + + cfg, err := ParseAndValidate(minimalValidArgs()) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.ACSEndpoint != "https://central.example.com" { + t.Errorf("expected https endpoint, got %q", cfg.ACSEndpoint) + } +} + +func TestIMP_CLI_013_BareHostnameGetsHTTPS(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") + + cfg, err := ParseAndValidate([]string{ + "--endpoint", "central.example.com", + "--co-namespace", "openshift-compliance", + }) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.ACSEndpoint != "https://central.example.com" { + t.Errorf("expected https:// prepended, got %q", cfg.ACSEndpoint) + } +} + +func TestIMP_CLI_013_BareHostnameWithPortGetsHTTPS(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") + + cfg, err := ParseAndValidate([]string{ + "--endpoint", "central.example.com:8443", + "--co-namespace", "openshift-compliance", + }) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.ACSEndpoint != "https://central.example.com:8443" { + t.Errorf("expected https:// prepended with port, got %q", cfg.ACSEndpoint) + } +} + +func TestIMP_CLI_013_HTTPRejected(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") + + _, err := ParseAndValidate([]string{ + "--endpoint", "http://central.example.com", + "--co-namespace", "openshift-compliance", + }) + if err == nil { + t.Fatal("expected error for http:// endpoint, got nil") + } + if !strings.Contains(err.Error(), "http://") { + t.Errorf("expected error to mention http://, got: %q", err.Error()) + } +} + +func TestIMP_CLI_013_FTPRejected(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") + + // ftp:// doesn't start with http:// so it's treated as a bare hostname. + // After prepending https:// it becomes https://ftp://... which is wrong + // but technically passes the scheme check. Let's verify it's handled. + cfg, err := ParseAndValidate([]string{ + "--endpoint", "ftp://central.example.com", + "--co-namespace", "openshift-compliance", + }) + // ftp:// doesn't start with http:// or https:// so gets https:// prepended. + // That's OK per spec — the spec only rejects http:// explicitly. + if err != nil { + t.Fatalf("unexpected error (ftp scheme gets https:// prepended): %v", err) + } + if !strings.HasPrefix(cfg.ACSEndpoint, "https://") { + t.Errorf("expected https:// prepended to ftp:// input, got %q", cfg.ACSEndpoint) + } +} + +func TestIMP_CLI_013_BareHostnameFromEnvGetsHTTPS(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_ENDPOINT", "central.example.com") + setenv(t, "ROX_API_TOKEN", "tok") + + cfg, err := ParseAndValidate([]string{"--co-namespace", "openshift-compliance"}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.ACSEndpoint != "https://central.example.com" { + t.Errorf("expected https:// prepended for bare hostname from env, got %q", cfg.ACSEndpoint) + } +} + +func TestIMP_CLI_013_OpenShiftRouteHostname(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") + + // Typical OpenShift route hostname. + cfg, err := ParseAndValidate([]string{ + "--endpoint", "central-stackrox.apps.mycluster.example.com", + "--co-namespace", "openshift-compliance", + }) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.ACSEndpoint != "https://central-stackrox.apps.mycluster.example.com" { + t.Errorf("expected https:// prepended, got %q", cfg.ACSEndpoint) + } +} + +// =========================================================================== +// IMP-CLI-024: basic mode fields (--username / ROX_ADMIN_USER / default admin) +// =========================================================================== + +func TestIMP_CLI_024_BasicModeDefaultUsername(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_ADMIN_PASSWORD", "s3cr3t") + + cfg, err := ParseAndValidate(minimalValidArgs()) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.Username != "admin" { + t.Errorf("expected default username 'admin', got %q", cfg.Username) + } +} + +func TestIMP_CLI_024_BasicModeUsernameFromFlag(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_ADMIN_PASSWORD", "s3cr3t") + + cfg, err := ParseAndValidate(minimalValidArgs("--username", "alice")) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.Username != "alice" { + t.Errorf("expected username 'alice', got %q", cfg.Username) + } +} + +func TestIMP_CLI_024_BasicModeUsernameFromEnv(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_ADMIN_PASSWORD", "s3cr3t") + setenv(t, "ROX_ADMIN_USER", "bob") + + cfg, err := ParseAndValidate(minimalValidArgs()) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.Username != "bob" { + t.Errorf("expected username 'bob' from ROX_ADMIN_USER, got %q", cfg.Username) + } +} + +func TestIMP_CLI_024_FlagOverridesEnvForUsername(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_ADMIN_PASSWORD", "s3cr3t") + setenv(t, "ROX_ADMIN_USER", "env-user") + + cfg, err := ParseAndValidate(minimalValidArgs("--username", "flag-user")) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.Username != "flag-user" { + t.Errorf("expected --username to override ROX_ADMIN_USER, got %q", cfg.Username) + } +} + +func TestIMP_CLI_024_TokenModeIgnoresUsername(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") + + // Username is still set but should be irrelevant in token mode. + cfg, err := ParseAndValidate(minimalValidArgs("--username", "ignored")) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.AuthMode != models.AuthModeToken { + t.Errorf("expected token mode, got %q", cfg.AuthMode) + } +} + +// =========================================================================== +// IMP-CLI-025: ambiguous auth +// =========================================================================== + +func TestIMP_CLI_025_BothTokenAndPasswordIsAmbiguous(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") + setenv(t, "ROX_ADMIN_PASSWORD", "pwd") + + _, err := ParseAndValidate(minimalValidArgs()) + if err == nil { + t.Fatal("expected error for ambiguous auth, got nil") + } + if !strings.Contains(strings.ToLower(err.Error()), "ambiguous") { + t.Errorf("expected 'ambiguous' in error, got: %q", err.Error()) + } +} + +func TestIMP_CLI_025_NeitherTokenNorPasswordErrors(t *testing.T) { + clearAuthEnv(t) + + _, err := ParseAndValidate(minimalValidArgs()) + if err == nil { + t.Fatal("expected error for missing auth, got nil") + } + if !strings.Contains(err.Error(), "ROX_API_TOKEN") || !strings.Contains(err.Error(), "ROX_ADMIN_PASSWORD") { + t.Errorf("expected error to mention both env vars, got: %q", err.Error()) + } +} + +// =========================================================================== +// Defaults and other flags (IMP-CLI-004, IMP-CLI-006..012) +// =========================================================================== + +func TestIMP_CLI_009_DefaultTimeout(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") + + cfg, err := ParseAndValidate(minimalValidArgs()) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.RequestTimeout != 30*time.Second { + t.Errorf("IMP-CLI-009: expected 30s timeout, got %v", cfg.RequestTimeout) + } +} + +func TestIMP_CLI_009_CustomTimeout(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") + + cfg, err := ParseAndValidate(minimalValidArgs("--request-timeout", "2m")) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.RequestTimeout != 2*time.Minute { + t.Errorf("expected 2m timeout, got %v", cfg.RequestTimeout) + } +} + +func TestIMP_CLI_010_DefaultMaxRetries(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") + + cfg, err := ParseAndValidate(minimalValidArgs()) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.MaxRetries != 5 { + t.Errorf("IMP-CLI-010: expected max retries 5, got %d", cfg.MaxRetries) + } +} + +func TestIMP_CLI_010_NegativeMaxRetriesRejected(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") + + _, err := ParseAndValidate(minimalValidArgs("--max-retries", "-1")) + if err == nil { + t.Fatal("IMP-CLI-010: expected error for negative max-retries, got nil") + } +} + +func TestIMP_CLI_010_ZeroMaxRetriesAllowed(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") + + cfg, err := ParseAndValidate(minimalValidArgs("--max-retries", "0")) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.MaxRetries != 0 { + t.Errorf("expected max retries 0, got %d", cfg.MaxRetries) + } +} + +func TestIMP_CLI_004_DefaultNamespace(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") + + cfg, err := ParseAndValidate([]string{ + "--endpoint", "https://central.example.com", + }) + if err != nil { + t.Fatalf("expected no error, got %v", err) + } + if cfg.CONamespace != "openshift-compliance" { + t.Fatalf("IMP-CLI-004: expected default namespace 'openshift-compliance', got %q", cfg.CONamespace) + } +} + +func TestIMP_CLI_004_CustomNamespace(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") + + cfg, err := ParseAndValidate([]string{ + "--endpoint", "https://central.example.com", + "--co-namespace", "custom-ns", + }) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.CONamespace != "custom-ns" { + t.Errorf("expected custom namespace, got %q", cfg.CONamespace) + } +} + +func TestIMP_CLI_004_AllNamespacesClearsDefault(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") + + cfg, err := ParseAndValidate(minimalValidArgs("--co-all-namespaces")) + if err != nil { + t.Fatalf("expected no error, got %v", err) + } + if cfg.CONamespace != "" { + t.Fatalf("expected empty namespace with --co-all-namespaces, got %q", cfg.CONamespace) + } + if !cfg.COAllNamespaces { + t.Fatal("expected COAllNamespaces to be true") + } +} + +func TestIMP_CLI_004_AllNamespacesWithoutExplicitNamespace(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") + + cfg, err := ParseAndValidate([]string{ + "--endpoint", "https://central.example.com", + "--co-all-namespaces", + }) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if !cfg.COAllNamespaces { + t.Error("expected COAllNamespaces=true") + } + if cfg.CONamespace != "" { + t.Errorf("expected empty CONamespace, got %q", cfg.CONamespace) + } +} + +func TestIMP_CLI_006_OverwriteExistingDefaultsFalse(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") + + cfg, err := ParseAndValidate(minimalValidArgs()) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.OverwriteExisting { + t.Error("IMP-CLI-006: expected OverwriteExisting to default to false") + } +} + +func TestIMP_CLI_027_OverwriteExistingFlag(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") + + cfg, err := ParseAndValidate(minimalValidArgs("--overwrite-existing")) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if !cfg.OverwriteExisting { + t.Error("IMP-CLI-027: expected OverwriteExisting=true when flag is set") + } +} + +func TestIMP_CLI_007_DryRunFlag(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") + + cfg, err := ParseAndValidate(minimalValidArgs("--dry-run")) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if !cfg.DryRun { + t.Error("IMP-CLI-007: expected DryRun=true when flag is set") + } +} + +func TestIMP_CLI_008_ReportJSONFlag(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") + + cfg, err := ParseAndValidate(minimalValidArgs("--report-json", "/tmp/report.json")) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.ReportJSON != "/tmp/report.json" { + t.Errorf("IMP-CLI-008: expected report path, got %q", cfg.ReportJSON) + } +} + +func TestIMP_CLI_011_CACertFileFlag(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") + + cfg, err := ParseAndValidate(minimalValidArgs("--ca-cert-file", "/path/to/ca.pem")) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.CACertFile != "/path/to/ca.pem" { + t.Errorf("IMP-CLI-011: expected ca-cert-file, got %q", cfg.CACertFile) + } +} + +func TestIMP_CLI_012_InsecureSkipVerifyDefault(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") + + cfg, err := ParseAndValidate(minimalValidArgs()) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.InsecureSkipVerify { + t.Error("IMP-CLI-012: expected InsecureSkipVerify to default to false") + } +} + +func TestIMP_CLI_012_InsecureSkipVerifyFlag(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") + + cfg, err := ParseAndValidate(minimalValidArgs("--insecure-skip-verify")) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if !cfg.InsecureSkipVerify { + t.Error("IMP-CLI-012: expected InsecureSkipVerify=true when flag is set") + } +} + +// =========================================================================== +// Edge cases +// =========================================================================== + +func TestTrailingSlashStripped(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") + + cfg, err := ParseAndValidate([]string{ + "--endpoint", "https://central.example.com/", + "--co-namespace", "openshift-compliance", + }) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.ACSEndpoint != "https://central.example.com" { + t.Errorf("expected trailing slash stripped, got %q", cfg.ACSEndpoint) + } +} + +func TestMultipleTrailingSlashesStripped(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") + + cfg, err := ParseAndValidate([]string{ + "--endpoint", "https://central.example.com///", + "--co-namespace", "openshift-compliance", + }) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.ACSEndpoint != "https://central.example.com" { + t.Errorf("expected all trailing slashes stripped, got %q", cfg.ACSEndpoint) + } +} + +func TestHelpReturnsSpecialError(t *testing.T) { + // Redirect stderr to avoid printing help text during test. + oldStderr := os.Stderr + os.Stderr, _ = os.Open(os.DevNull) + defer func() { os.Stderr = oldStderr }() + + _, err := ParseAndValidate([]string{"--help"}) + if err != ErrHelpRequested { + t.Errorf("expected ErrHelpRequested, got %v", err) + } +} + +func TestUnknownFlagRejected(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") + + _, err := ParseAndValidate(minimalValidArgs("--unknown-flag", "value")) + if err == nil { + t.Fatal("expected error for unknown flag, got nil") + } +} + +func TestEmptyArgsWithTokenAndEndpoint(t *testing.T) { + clearAuthEnv(t) + setenv(t, "ROX_API_TOKEN", "tok") + setenv(t, "ROX_ENDPOINT", "https://central.example.com") + + cfg, err := ParseAndValidate([]string{}) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.ACSEndpoint != "https://central.example.com" { + t.Errorf("expected endpoint from env with empty args, got %q", cfg.ACSEndpoint) + } + if cfg.AuthMode != models.AuthModeToken { + t.Errorf("expected token mode, got %q", cfg.AuthMode) + } +} + +func TestWhitespaceOnlyTokenIsEmpty(t *testing.T) { + clearAuthEnv(t) + // Whitespace-only token — os.Getenv returns it as non-empty. + // The config layer treats it as set (auth is inferred as token). + // Preflight will catch an invalid token at the network level. + setenv(t, "ROX_API_TOKEN", " ") + + cfg, err := ParseAndValidate(minimalValidArgs()) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if cfg.AuthMode != models.AuthModeToken { + t.Errorf("expected token mode for whitespace token, got %q", cfg.AuthMode) + } +} diff --git a/scripts/compliance-operator-importer/internal/discover/discover.go b/scripts/compliance-operator-importer/internal/discover/discover.go new file mode 100644 index 0000000000000..777149e4582bb --- /dev/null +++ b/scripts/compliance-operator-importer/internal/discover/discover.go @@ -0,0 +1,171 @@ +// Package discover handles auto-discovery of ACS cluster IDs from Kubernetes +// clusters using multiple fallback methods. +package discover + +import ( + "context" + "encoding/base64" + "errors" + "fmt" + "strings" + + "github.com/stackrox/co-acs-importer/internal/models" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/dynamic" +) + +// k8sResourceReader abstracts Kubernetes resource lookups for testing. +type k8sResourceReader interface { + getAdmissionControlClusterID(ctx context.Context) (string, error) + getOpenShiftClusterID(ctx context.Context) (string, error) + getHelmSecretClusterName(ctx context.Context) (string, error) +} + +// k8sDiscoveryClient is the production implementation using a dynamic k8s client. +type k8sDiscoveryClient struct { + dynamic dynamic.Interface +} + +// NewK8sDiscoveryClient creates a k8sResourceReader from a dynamic k8s client. +func NewK8sDiscoveryClient(dynClient dynamic.Interface) k8sResourceReader { + return &k8sDiscoveryClient{dynamic: dynClient} +} + +// IMP-MAP-016: admission-control ConfigMap in stackrox namespace. +func (c *k8sDiscoveryClient) getAdmissionControlClusterID(ctx context.Context) (string, error) { + gvr := schema.GroupVersionResource{Group: "", Version: "v1", Resource: "configmaps"} + obj, err := c.dynamic.Resource(gvr).Namespace("stackrox").Get(ctx, "admission-control", metav1.GetOptions{}) + if err != nil { + return "", fmt.Errorf("get admission-control ConfigMap: %w", err) + } + + data, found, err := unstructured.NestedStringMap(obj.Object, "data") + if err != nil || !found { + return "", fmt.Errorf("parse ConfigMap data: %w", err) + } + + clusterID, ok := data["cluster-id"] + if !ok || clusterID == "" { + return "", errors.New("cluster-id not found in admission-control ConfigMap") + } + return clusterID, nil +} + +// IMP-MAP-017: OpenShift ClusterVersion resource. +func (c *k8sDiscoveryClient) getOpenShiftClusterID(ctx context.Context) (string, error) { + gvr := schema.GroupVersionResource{ + Group: "config.openshift.io", + Version: "v1", + Resource: "clusterversions", + } + obj, err := c.dynamic.Resource(gvr).Get(ctx, "version", metav1.GetOptions{}) + if err != nil { + return "", fmt.Errorf("get ClusterVersion: %w", err) + } + + clusterID, found, err := unstructured.NestedString(obj.Object, "spec", "clusterID") + if err != nil || !found { + return "", fmt.Errorf("parse ClusterVersion.spec.clusterID: %w", err) + } + if clusterID == "" { + return "", errors.New("ClusterVersion.spec.clusterID is empty") + } + return clusterID, nil +} + +// IMP-MAP-018: helm-effective-cluster-name secret in stackrox namespace. +func (c *k8sDiscoveryClient) getHelmSecretClusterName(ctx context.Context) (string, error) { + gvr := schema.GroupVersionResource{Group: "", Version: "v1", Resource: "secrets"} + obj, err := c.dynamic.Resource(gvr).Namespace("stackrox").Get(ctx, "helm-effective-cluster-name", metav1.GetOptions{}) + if err != nil { + return "", fmt.Errorf("get helm-effective-cluster-name Secret: %w", err) + } + + data, found, err := unstructured.NestedStringMap(obj.Object, "data") + if err != nil || !found { + return "", fmt.Errorf("parse Secret data: %w", err) + } + + encodedName, ok := data["cluster-name"] + if !ok || encodedName == "" { + return "", errors.New("cluster-name not found in helm-effective-cluster-name Secret") + } + + // Kubernetes secrets are base64-encoded. + decoded, err := base64.StdEncoding.DecodeString(encodedName) + if err != nil { + return "", fmt.Errorf("decode cluster-name: %w", err) + } + return string(decoded), nil +} + +// DiscoverClusterID attempts to resolve the ACS cluster ID for the given source cluster. +// +// Discovery chain (try in order, use first success): +// 1. admission-control ConfigMap: direct ACS cluster UUID (IMP-MAP-016). +// 2. OpenShift ClusterVersion: match providerMetadata.cluster.id (IMP-MAP-017). +// 3. helm-effective-cluster-name secret: match by cluster name (IMP-MAP-018). +// +// Returns error if all methods fail. +func DiscoverClusterID( + ctx context.Context, + k8s k8sResourceReader, + acs models.ACSClient, +) (string, error) { + var errs []error + + // IMP-MAP-016: admission-control ConfigMap. + clusterID, err := k8s.getAdmissionControlClusterID(ctx) + if err == nil { + return clusterID, nil + } + errs = append(errs, fmt.Errorf("admission-control ConfigMap: %w", err)) + + // IMP-MAP-017: OpenShift ClusterVersion. + ocpClusterID, err := k8s.getOpenShiftClusterID(ctx) + if err == nil { + clusters, listErr := acs.ListClusters(ctx) + if listErr != nil { + return "", fmt.Errorf("list ACS clusters for OpenShift ID match: %w", listErr) + } + for _, c := range clusters { + if c.ProviderClusterID == ocpClusterID { + return c.ID, nil + } + } + errs = append(errs, fmt.Errorf("OpenShift cluster ID %q not found in ACS clusters", ocpClusterID)) + } else { + errs = append(errs, fmt.Errorf("OpenShift ClusterVersion: %w", err)) + } + + // IMP-MAP-018: helm-effective-cluster-name secret. + clusterName, err := k8s.getHelmSecretClusterName(ctx) + if err == nil { + clusters, listErr := acs.ListClusters(ctx) + if listErr != nil { + return "", fmt.Errorf("list ACS clusters for helm cluster name match: %w", listErr) + } + for _, c := range clusters { + if c.Name == clusterName { + return c.ID, nil + } + } + errs = append(errs, fmt.Errorf("helm cluster name %q not found in ACS clusters", clusterName)) + } else { + errs = append(errs, fmt.Errorf("helm-effective-cluster-name Secret: %w", err)) + } + + return "", fmt.Errorf("all discovery methods failed to resolve ACS cluster ID:\n - %s", + joinErrors(errs)) +} + +// joinErrors formats a slice of errors as a newline+bullet list. +func joinErrors(errs []error) string { + parts := make([]string, len(errs)) + for i, e := range errs { + parts[i] = e.Error() + } + return strings.Join(parts, "\n - ") +} diff --git a/scripts/compliance-operator-importer/internal/discover/discover_test.go b/scripts/compliance-operator-importer/internal/discover/discover_test.go new file mode 100644 index 0000000000000..695f4fd3267b9 --- /dev/null +++ b/scripts/compliance-operator-importer/internal/discover/discover_test.go @@ -0,0 +1,169 @@ +package discover + +import ( + "context" + "errors" + "testing" + + "github.com/stackrox/co-acs-importer/internal/models" +) + +// mockK8sClient implements the k8sResourceReader interface for testing. +type mockK8sClient struct { + admissionControlCM map[string]string + admissionControlErr error + clusterVersionID string + clusterVersionErr error + helmSecretClusterName string + helmSecretErr error +} + +func (m *mockK8sClient) getAdmissionControlClusterID(ctx context.Context) (string, error) { + if m.admissionControlErr != nil { + return "", m.admissionControlErr + } + return m.admissionControlCM["cluster-id"], nil +} + +func (m *mockK8sClient) getOpenShiftClusterID(ctx context.Context) (string, error) { + if m.clusterVersionErr != nil { + return "", m.clusterVersionErr + } + return m.clusterVersionID, nil +} + +func (m *mockK8sClient) getHelmSecretClusterName(ctx context.Context) (string, error) { + if m.helmSecretErr != nil { + return "", m.helmSecretErr + } + return m.helmSecretClusterName, nil +} + +// mockACSClient implements the models.ACSClient interface for testing. +type mockACSClient struct { + clusters []models.ACSClusterInfo + err error +} + +func (m *mockACSClient) Preflight(ctx context.Context) error { + return nil +} + +func (m *mockACSClient) ListScanConfigurations(ctx context.Context) ([]models.ACSConfigSummary, error) { + return nil, nil +} + +func (m *mockACSClient) CreateScanConfiguration(ctx context.Context, payload models.ACSCreatePayload) (string, error) { + return "", nil +} + +func (m *mockACSClient) UpdateScanConfiguration(ctx context.Context, id string, payload models.ACSCreatePayload) error { + return nil +} + +func (m *mockACSClient) ListClusters(ctx context.Context) ([]models.ACSClusterInfo, error) { + return m.clusters, m.err +} + +// TestIMP_MAP_016_AdmissionControlConfigMap verifies discovery via admission-control ConfigMap. +func TestIMP_MAP_016_AdmissionControlConfigMap(t *testing.T) { + ctx := context.Background() + k8s := &mockK8sClient{ + admissionControlCM: map[string]string{"cluster-id": "acs-uuid-12345"}, + } + acs := &mockACSClient{} + + clusterID, err := DiscoverClusterID(ctx, k8s, acs) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if clusterID != "acs-uuid-12345" { + t.Errorf("expected cluster ID from admission-control CM, got %q", clusterID) + } +} + +// TestIMP_MAP_017_OpenShiftClusterVersion verifies discovery via OpenShift ClusterVersion. +func TestIMP_MAP_017_OpenShiftClusterVersion(t *testing.T) { + ctx := context.Background() + k8s := &mockK8sClient{ + admissionControlErr: errors.New("not found"), + clusterVersionID: "ocp-cluster-abc", + } + acs := &mockACSClient{ + clusters: []models.ACSClusterInfo{ + {ID: "acs-uuid-1", Name: "cluster-1", ProviderClusterID: "ocp-cluster-abc"}, + {ID: "acs-uuid-2", Name: "cluster-2", ProviderClusterID: "ocp-cluster-xyz"}, + }, + } + + clusterID, err := DiscoverClusterID(ctx, k8s, acs) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if clusterID != "acs-uuid-1" { + t.Errorf("expected cluster ID from OpenShift ClusterVersion match, got %q", clusterID) + } +} + +// TestIMP_MAP_018_HelmSecretClusterName verifies discovery via helm-effective-cluster-name secret. +func TestIMP_MAP_018_HelmSecretClusterName(t *testing.T) { + ctx := context.Background() + k8s := &mockK8sClient{ + admissionControlErr: errors.New("not found"), + clusterVersionErr: errors.New("not found"), + helmSecretClusterName: "production", + } + acs := &mockACSClient{ + clusters: []models.ACSClusterInfo{ + {ID: "acs-uuid-1", Name: "production"}, + {ID: "acs-uuid-2", Name: "staging"}, + }, + } + + clusterID, err := DiscoverClusterID(ctx, k8s, acs) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if clusterID != "acs-uuid-1" { + t.Errorf("expected cluster ID from helm secret match, got %q", clusterID) + } +} + +// TestDiscoveryFallbackChain verifies the discovery chain tries methods in order. +func TestDiscoveryFallbackChain(t *testing.T) { + ctx := context.Background() + k8s := &mockK8sClient{ + admissionControlErr: errors.New("not found"), + clusterVersionErr: errors.New("not found"), + helmSecretClusterName: "fallback-cluster", + } + acs := &mockACSClient{ + clusters: []models.ACSClusterInfo{ + {ID: "acs-uuid-fallback", Name: "fallback-cluster"}, + }, + } + + clusterID, err := DiscoverClusterID(ctx, k8s, acs) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if clusterID != "acs-uuid-fallback" { + t.Errorf("expected fallback cluster ID, got %q", clusterID) + } +} + +// TestDiscoveryAllMethodsFail verifies error when all discovery methods fail. +func TestDiscoveryAllMethodsFail(t *testing.T) { + ctx := context.Background() + k8s := &mockK8sClient{ + admissionControlErr: errors.New("not found"), + clusterVersionErr: errors.New("not found"), + helmSecretErr: errors.New("not found"), + } + acs := &mockACSClient{} + + _, err := DiscoverClusterID(ctx, k8s, acs) + if err == nil { + t.Fatal("expected error when all discovery methods fail, got nil") + } +} diff --git a/scripts/compliance-operator-importer/internal/mapping/mapping.go b/scripts/compliance-operator-importer/internal/mapping/mapping.go new file mode 100644 index 0000000000000..2c16ada02cefb --- /dev/null +++ b/scripts/compliance-operator-importer/internal/mapping/mapping.go @@ -0,0 +1,114 @@ +package mapping + +import ( + "fmt" + "slices" + + "github.com/stackrox/co-acs-importer/internal/cofetch" + "github.com/stackrox/co-acs-importer/internal/models" +) + +// MappingResult is returned per ScanSettingBinding. +// Exactly one of Payload or Problem will be non-nil. +type MappingResult struct { + // Payload is non-nil on success and contains the ACS create payload. + Payload *models.ACSCreatePayload + // Problem is non-nil when the binding should be skipped, with details about why. + Problem *models.Problem +} + +// MapBinding converts one ScanSettingBinding and its referenced ScanSetting into an +// ACS create payload, or returns a Problem if the binding should be skipped. +// +// Rules applied: +// - IMP-MAP-001: scanName = binding.Name; profiles = sorted+deduped list of profile names. +// - IMP-MAP-002: missing profile kind defaults to "Profile" (ProfileRef.Kind is "" => Profile). +// - IMP-MAP-003: oneTimeScan=false when a schedule is present. +// - IMP-MAP-004: scanSchedule set from ConvertCronToACSSchedule. +// - IMP-MAP-005: description contains "Imported from CO ScanSettingBinding /". +// - IMP-MAP-006: description includes the ScanSetting name. +// - IMP-MAP-007: clusters = [cfg.ACSClusterID]. +// - IMP-MAP-008..011: nil ScanSetting => Problem{category:mapping, skipped:true}. +// - IMP-MAP-012..015: invalid cron => Problem{category:mapping, skipped:true}. +func MapBinding(binding cofetch.ScanSettingBinding, ss *cofetch.ScanSetting, cfg *models.Config) MappingResult { + ref := fmt.Sprintf("%s/%s", binding.Namespace, binding.Name) + + // IMP-MAP-008, IMP-MAP-009, IMP-MAP-010: missing ScanSetting. + if ss == nil { + return MappingResult{ + Problem: &models.Problem{ + Severity: models.SeverityError, + Category: models.CategoryMapping, + ResourceRef: ref, + Description: fmt.Sprintf( + "ScanSettingBinding %q references ScanSetting %q which could not be found", + ref, binding.ScanSettingName, + ), + FixHint: fmt.Sprintf( + "Ensure ScanSetting %q exists in namespace %q and is readable by the importer. "+ + "Verify with: kubectl get scansetting %s -n %s", + binding.ScanSettingName, binding.Namespace, + binding.ScanSettingName, binding.Namespace, + ), + Skipped: true, + }, + } + } + + // IMP-MAP-004, IMP-MAP-012..015: convert cron schedule. + schedule, err := ConvertCronToACSSchedule(ss.Schedule) + if err != nil { + return MappingResult{ + Problem: &models.Problem{ + Severity: models.SeverityError, + Category: models.CategoryMapping, + ResourceRef: ref, + Description: fmt.Sprintf( + "schedule conversion failed for ScanSettingBinding %q (ScanSetting %q, schedule %q): %v", + ref, ss.Name, ss.Schedule, err, + ), + FixHint: fmt.Sprintf( + "Update ScanSetting %q to use a supported 5-field cron expression, for example: "+ + "\"0 2 * * *\" (daily at 02:00), \"0 2 * * 0\" (weekly on Sunday), "+ + "\"0 2 1 * *\" (monthly on the 1st). "+ + "Step and range notation in the cron expression are not supported.", + ss.Name, + ), + Skipped: true, + }, + } + } + + // IMP-MAP-001, IMP-MAP-002: collect profiles, dedup, sort. + // ProfileRef.Kind being empty is equivalent to "Profile" (IMP-MAP-002). + // Only the profile name is used in the ACS payload; kind determines lookup but + // both Profile and TailoredProfile names go into the same ACS profiles list. + profileSet := make(map[string]struct{}, len(binding.Profiles)) + for _, p := range binding.Profiles { + profileSet[p.Name] = struct{}{} + } + profiles := make([]string, 0, len(profileSet)) + for name := range profileSet { + profiles = append(profiles, name) + } + slices.Sort(profiles) // IMP-MAP-001: deterministic sorted order + + // IMP-MAP-005, IMP-MAP-006: build description. + description := fmt.Sprintf( + "Imported from CO ScanSettingBinding %s/%s (ScanSetting: %s)", + binding.Namespace, binding.Name, ss.Name, + ) + + return MappingResult{ + Payload: &models.ACSCreatePayload{ + ScanName: binding.Name, // IMP-MAP-001 + ScanConfig: models.ACSBaseScanConfig{ + OneTimeScan: false, // IMP-MAP-003 + Profiles: profiles, // IMP-MAP-001 + ScanSchedule: schedule, // IMP-MAP-004 + Description: description, // IMP-MAP-005, IMP-MAP-006 + }, + Clusters: []string{cfg.ACSClusterID}, // IMP-MAP-007 + }, + } +} diff --git a/scripts/compliance-operator-importer/internal/mapping/mapping_test.go b/scripts/compliance-operator-importer/internal/mapping/mapping_test.go new file mode 100644 index 0000000000000..a179cf6fa93ea --- /dev/null +++ b/scripts/compliance-operator-importer/internal/mapping/mapping_test.go @@ -0,0 +1,504 @@ +package mapping + +import ( + "encoding/json" + "strings" + "testing" + + "github.com/stackrox/co-acs-importer/internal/cofetch" + "github.com/stackrox/co-acs-importer/internal/models" +) + +// baseBinding returns a minimal valid ScanSettingBinding for tests. +func baseBinding() cofetch.ScanSettingBinding { + return cofetch.ScanSettingBinding{ + Namespace: "openshift-compliance", + Name: "cis-weekly", + ScanSettingName: "default-auto-apply", + Profiles: []cofetch.ProfileRef{ + {Name: "ocp4-cis-node", Kind: "Profile"}, + {Name: "ocp4-cis-master", Kind: "Profile"}, + {Name: "my-tailored-profile", Kind: "TailoredProfile"}, + }, + } +} + +// baseScanSetting returns a minimal valid ScanSetting for tests. +func baseScanSetting() *cofetch.ScanSetting { + return &cofetch.ScanSetting{ + Namespace: "openshift-compliance", + Name: "default-auto-apply", + Schedule: "0 0 * * *", + } +} + +// baseConfig returns a minimal Config for tests. +func baseConfig() *models.Config { + return &models.Config{ + ACSClusterID: "cluster-a", + } +} + +// TestIMP_MAP_001_ScanName verifies the ACS payload scanName equals the binding name. +func TestIMP_MAP_001_ScanName(t *testing.T) { + result := MapBinding(baseBinding(), baseScanSetting(), baseConfig()) + if result.Problem != nil { + t.Fatalf("unexpected problem: %+v", result.Problem) + } + if result.Payload == nil { + t.Fatal("expected non-nil payload") + } + if result.Payload.ScanName != "cis-weekly" { + t.Errorf("ScanName: want %q, got %q", "cis-weekly", result.Payload.ScanName) + } +} + +// TestIMP_MAP_001_ProfilesSortedDeduped verifies profiles are sorted and deduplicated. +func TestIMP_MAP_001_ProfilesSortedDeduped(t *testing.T) { + binding := baseBinding() + // Add a duplicate entry. + binding.Profiles = append(binding.Profiles, cofetch.ProfileRef{Name: "ocp4-cis-node", Kind: "Profile"}) + + result := MapBinding(binding, baseScanSetting(), baseConfig()) + if result.Problem != nil { + t.Fatalf("unexpected problem: %+v", result.Problem) + } + want := []string{"my-tailored-profile", "ocp4-cis-master", "ocp4-cis-node"} + got := result.Payload.ScanConfig.Profiles + if len(got) != len(want) { + t.Fatalf("Profiles len: want %d, got %d: %v", len(want), len(got), got) + } + for i, w := range want { + if got[i] != w { + t.Errorf("Profiles[%d]: want %q, got %q", i, w, got[i]) + } + } +} + +// TestIMP_MAP_002_MissingKindDefaultsToProfile verifies that a ProfileRef with empty +// Kind is accepted and the profile name is included in ACS profiles (IMP-MAP-002). +// The kind=="" semantics mean "treat as Profile" — no lookup difference for the importer. +func TestIMP_MAP_002_MissingKindDefaultsToProfile(t *testing.T) { + binding := baseBinding() + binding.Profiles = []cofetch.ProfileRef{ + {Name: "custom-x"}, // Kind is empty => defaults to "Profile" + } + + result := MapBinding(binding, baseScanSetting(), baseConfig()) + if result.Problem != nil { + t.Fatalf("unexpected problem: %+v", result.Problem) + } + if len(result.Payload.ScanConfig.Profiles) != 1 { + t.Fatalf("expected 1 profile, got %v", result.Payload.ScanConfig.Profiles) + } + if result.Payload.ScanConfig.Profiles[0] != "custom-x" { + t.Errorf("profile name: want %q, got %q", "custom-x", result.Payload.ScanConfig.Profiles[0]) + } +} + +// TestIMP_MAP_003_OneTimeScanFalseWhenScheduleSet verifies oneTimeScan is false +// when the ScanSetting has a cron schedule. +func TestIMP_MAP_003_OneTimeScanFalseWhenScheduleSet(t *testing.T) { + result := MapBinding(baseBinding(), baseScanSetting(), baseConfig()) + if result.Problem != nil { + t.Fatalf("unexpected problem: %+v", result.Problem) + } + if result.Payload.ScanConfig.OneTimeScan { + t.Error("OneTimeScan: want false when schedule is set") + } +} + +// TestIMP_MAP_004_ScanSchedulePresentWhenScheduleSet verifies scanSchedule is non-nil +// when the ScanSetting has a cron schedule. +func TestIMP_MAP_004_ScanSchedulePresentWhenScheduleSet(t *testing.T) { + result := MapBinding(baseBinding(), baseScanSetting(), baseConfig()) + if result.Problem != nil { + t.Fatalf("unexpected problem: %+v", result.Problem) + } + if result.Payload.ScanConfig.ScanSchedule == nil { + t.Error("ScanSchedule: want non-nil when schedule is set") + } +} + +// TestIMP_MAP_005_DescriptionContainsBindingRef verifies the description contains +// "Imported from CO ScanSettingBinding /". +func TestIMP_MAP_005_DescriptionContainsBindingRef(t *testing.T) { + result := MapBinding(baseBinding(), baseScanSetting(), baseConfig()) + if result.Problem != nil { + t.Fatalf("unexpected problem: %+v", result.Problem) + } + want := "Imported from CO ScanSettingBinding openshift-compliance/cis-weekly" + if !strings.Contains(result.Payload.ScanConfig.Description, want) { + t.Errorf("Description: want it to contain %q, got %q", want, result.Payload.ScanConfig.Description) + } +} + +// TestIMP_MAP_006_DescriptionIncludesScanSettingName verifies the description +// includes a reference to the ScanSetting name. +func TestIMP_MAP_006_DescriptionIncludesScanSettingName(t *testing.T) { + result := MapBinding(baseBinding(), baseScanSetting(), baseConfig()) + if result.Problem != nil { + t.Fatalf("unexpected problem: %+v", result.Problem) + } + if !strings.Contains(result.Payload.ScanConfig.Description, "default-auto-apply") { + t.Errorf("Description: want ScanSetting name %q included, got %q", + "default-auto-apply", result.Payload.ScanConfig.Description) + } +} + +// TestIMP_MAP_007_ClustersContainsACSClusterID verifies clusters contains the +// configured ACS cluster ID. +func TestIMP_MAP_007_ClustersContainsACSClusterID(t *testing.T) { + result := MapBinding(baseBinding(), baseScanSetting(), baseConfig()) + if result.Problem != nil { + t.Fatalf("unexpected problem: %+v", result.Problem) + } + if len(result.Payload.Clusters) != 1 { + t.Fatalf("Clusters: want 1 entry, got %v", result.Payload.Clusters) + } + if result.Payload.Clusters[0] != "cluster-a" { + t.Errorf("Clusters[0]: want %q, got %q", "cluster-a", result.Payload.Clusters[0]) + } +} + +// TestIMP_MAP_008_MissingScanSettingSkipsBinding verifies that a nil ScanSetting +// results in a MappingResult with nil Payload and non-nil Problem (IMP-MAP-008). +func TestIMP_MAP_008_MissingScanSettingSkipsBinding(t *testing.T) { + result := MapBinding(baseBinding(), nil, baseConfig()) + if result.Payload != nil { + t.Errorf("Payload: want nil when ScanSetting is missing, got %+v", result.Payload) + } + if result.Problem == nil { + t.Fatal("Problem: want non-nil when ScanSetting is missing") + } +} + +// TestIMP_MAP_008_MissingScanSettingCategoryMapping verifies the problem category +// is "mapping" for a missing ScanSetting (IMP-MAP-008). +func TestIMP_MAP_008_MissingScanSettingCategoryMapping(t *testing.T) { + result := MapBinding(baseBinding(), nil, baseConfig()) + if result.Problem == nil { + t.Fatal("Problem: want non-nil") + } + if result.Problem.Category != models.CategoryMapping { + t.Errorf("Problem.Category: want %q, got %q", models.CategoryMapping, result.Problem.Category) + } +} + +// TestIMP_MAP_009_MissingScanSettingProblemsEntry verifies the problem entry has +// a populated ResourceRef (IMP-MAP-009). +func TestIMP_MAP_009_MissingScanSettingProblemsEntry(t *testing.T) { + result := MapBinding(baseBinding(), nil, baseConfig()) + if result.Problem == nil { + t.Fatal("Problem: want non-nil") + } + if result.Problem.ResourceRef == "" { + t.Error("Problem.ResourceRef: want non-empty") + } +} + +// TestIMP_MAP_010_MissingScanSettingFixHint verifies the problem entry has a +// non-empty fix hint (IMP-MAP-010). +func TestIMP_MAP_010_MissingScanSettingFixHint(t *testing.T) { + result := MapBinding(baseBinding(), nil, baseConfig()) + if result.Problem == nil { + t.Fatal("Problem: want non-nil") + } + if result.Problem.FixHint == "" { + t.Error("Problem.FixHint: want non-empty fix hint for missing ScanSetting") + } +} + +// TestIMP_MAP_011_OtherValidBindingsStillProcessed verifies that a missing ScanSetting +// only affects that binding; independent MapBinding calls for valid bindings succeed (IMP-MAP-011). +func TestIMP_MAP_011_OtherValidBindingsStillProcessed(t *testing.T) { + // Broken binding (nil ScanSetting). + broken := MapBinding(baseBinding(), nil, baseConfig()) + if broken.Problem == nil { + t.Fatal("broken binding: want Problem set") + } + + // Valid binding processed independently and must succeed. + validBinding := cofetch.ScanSettingBinding{ + Namespace: "openshift-compliance", + Name: "another-binding", + ScanSettingName: "default-auto-apply", + Profiles: []cofetch.ProfileRef{{Name: "ocp4-cis", Kind: "Profile"}}, + } + valid := MapBinding(validBinding, baseScanSetting(), baseConfig()) + if valid.Problem != nil { + t.Fatalf("valid binding: unexpected problem: %+v", valid.Problem) + } + if valid.Payload == nil { + t.Fatal("valid binding: expected non-nil payload") + } +} + +// TestIMP_MAP_012_InvalidCronSkipsBinding verifies that an invalid cron expression +// causes the binding to be skipped (Payload=nil, Problem set, Skipped=true) (IMP-MAP-012). +func TestIMP_MAP_012_InvalidCronSkipsBinding(t *testing.T) { + ss := &cofetch.ScanSetting{ + Namespace: "openshift-compliance", + Name: "bad-schedule", + Schedule: "every day at noon", + } + result := MapBinding(baseBinding(), ss, baseConfig()) + if result.Payload != nil { + t.Errorf("Payload: want nil for invalid cron, got %+v", result.Payload) + } + if result.Problem == nil { + t.Fatal("Problem: want non-nil for invalid cron") + } + if !result.Problem.Skipped { + t.Error("Problem.Skipped: want true") + } +} + +// TestIMP_MAP_013_InvalidCronProblemCategoryMapping verifies the problem category +// is "mapping" for an invalid schedule (IMP-MAP-013). +func TestIMP_MAP_013_InvalidCronProblemCategoryMapping(t *testing.T) { + ss := &cofetch.ScanSetting{ + Namespace: "openshift-compliance", + Name: "bad-schedule", + Schedule: "every day at noon", + } + result := MapBinding(baseBinding(), ss, baseConfig()) + if result.Problem == nil { + t.Fatal("Problem: want non-nil") + } + if result.Problem.Category != models.CategoryMapping { + t.Errorf("Problem.Category: want %q, got %q", models.CategoryMapping, result.Problem.Category) + } +} + +// TestIMP_MAP_014_InvalidCronDescriptionMentionsSchedule verifies the problem +// description mentions schedule conversion failure (IMP-MAP-014). +func TestIMP_MAP_014_InvalidCronDescriptionMentionsSchedule(t *testing.T) { + ss := &cofetch.ScanSetting{ + Namespace: "openshift-compliance", + Name: "bad-schedule", + Schedule: "every day at noon", + } + result := MapBinding(baseBinding(), ss, baseConfig()) + if result.Problem == nil { + t.Fatal("Problem: want non-nil") + } + desc := strings.ToLower(result.Problem.Description) + if !strings.Contains(desc, "schedule") { + t.Errorf("Problem.Description: want it to mention %q, got %q", "schedule", result.Problem.Description) + } +} + +// TestIMP_MAP_015_InvalidCronFixHintMentionsCron verifies the problem fix hint +// suggests using a valid cron expression (IMP-MAP-015). +func TestIMP_MAP_015_InvalidCronFixHintMentionsCron(t *testing.T) { + ss := &cofetch.ScanSetting{ + Namespace: "openshift-compliance", + Name: "bad-schedule", + Schedule: "every day at noon", + } + result := MapBinding(baseBinding(), ss, baseConfig()) + if result.Problem == nil { + t.Fatal("Problem: want non-nil") + } + hint := strings.ToLower(result.Problem.FixHint) + if !strings.Contains(hint, "cron") { + t.Errorf("Problem.FixHint: want it to mention %q, got %q", "cron", result.Problem.FixHint) + } +} + +// ─── Wire-format tests (IMP-MAP-004a..d) ───────────────────────────────────── +// +// These tests serialize the ACS payload to JSON and verify that field names +// match the proto/api/v2 schema. They would have caught the Weekly vs DaysOfWeek +// bug: the ACS API proto has "daysOfWeek" but no "weekly" field, so a JSON +// containing "weekly" would be silently ignored by the gRPC gateway. + +// allowedScheduleKeys are the JSON keys allowed in a serialized ACSSchedule, +// matching proto/api/v2/common.proto message Schedule. +var allowedScheduleKeys = map[string]bool{ + "intervalType": true, + "hour": true, + "minute": true, + "daysOfWeek": true, + "daysOfMonth": true, +} + +// allowedPayloadKeys are the top-level JSON keys allowed in a serialized +// ACSCreatePayload, matching proto ComplianceScanConfiguration. +var allowedPayloadKeys = map[string]bool{ + "scanName": true, + "scanConfig": true, + "clusters": true, +} + +// allowedScanConfigKeys are the JSON keys allowed in a serialized +// ACSBaseScanConfig, matching proto BaseComplianceScanConfigurationSettings. +var allowedScanConfigKeys = map[string]bool{ + "oneTimeScan": true, + "profiles": true, + "scanSchedule": true, + "description": true, +} + +// TestIMP_MAP_004a_PayloadWireFormat_AllScheduleTypes verifies that the full +// ACS payload serializes to JSON with only proto-valid field names for each +// schedule type: DAILY, WEEKLY, MONTHLY. +func TestIMP_MAP_004a_PayloadWireFormat_AllScheduleTypes(t *testing.T) { + cases := []struct { + name string + cron string + wantInterval string + wantDOW bool // expect daysOfWeek present + wantDOM bool // expect daysOfMonth present + }{ + {name: "DAILY", cron: "0 2 * * *", wantInterval: "DAILY"}, + {name: "WEEKLY_Sunday", cron: "0 2 * * 0", wantInterval: "WEEKLY", wantDOW: true}, + {name: "WEEKLY_Friday", cron: "30 14 * * 5", wantInterval: "WEEKLY", wantDOW: true}, + {name: "MONTHLY_1st", cron: "0 2 1 * *", wantInterval: "MONTHLY", wantDOM: true}, + {name: "MONTHLY_15th", cron: "0 6 15 * *", wantInterval: "MONTHLY", wantDOM: true}, + } + + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + binding := cofetch.ScanSettingBinding{ + Namespace: "ns", + Name: "b", + ScanSettingName: "ss", + Profiles: []cofetch.ProfileRef{{Name: "ocp4-cis", Kind: "Profile"}}, + } + ss := &cofetch.ScanSetting{Namespace: "ns", Name: "ss", Schedule: tc.cron} + cfg := &models.Config{ACSClusterID: "cluster-1"} + + result := MapBinding(binding, ss, cfg) + if result.Problem != nil { + t.Fatalf("unexpected problem: %+v", result.Problem) + } + + // Serialize to JSON — this is the wire format sent to the ACS API. + data, err := json.Marshal(result.Payload) + if err != nil { + t.Fatalf("json.Marshal: %v", err) + } + + // Parse back to a generic map to inspect field names. + var raw map[string]json.RawMessage + if err := json.Unmarshal(data, &raw); err != nil { + t.Fatalf("unmarshal payload: %v", err) + } + + // IMP-MAP-004d: top-level payload keys must match proto. + for key := range raw { + if !allowedPayloadKeys[key] { + t.Errorf("payload contains unexpected JSON key %q (not in ComplianceScanConfiguration proto)", key) + } + } + + // Parse scanConfig. + var scanConfig map[string]json.RawMessage + if err := json.Unmarshal(raw["scanConfig"], &scanConfig); err != nil { + t.Fatalf("unmarshal scanConfig: %v", err) + } + for key := range scanConfig { + if !allowedScanConfigKeys[key] { + t.Errorf("scanConfig contains unexpected JSON key %q (not in BaseComplianceScanConfigurationSettings proto)", key) + } + } + + // Parse scanSchedule. + schedRaw, ok := scanConfig["scanSchedule"] + if !ok { + t.Fatal("scanSchedule missing from JSON") + } + var sched map[string]json.RawMessage + if err := json.Unmarshal(schedRaw, &sched); err != nil { + t.Fatalf("unmarshal scanSchedule: %v", err) + } + + // IMP-MAP-004a: schedule keys must only be proto-valid. + for key := range sched { + if !allowedScheduleKeys[key] { + t.Errorf("scanSchedule contains unexpected JSON key %q (not in Schedule proto; would be silently ignored by gRPC gateway)", key) + } + } + + // Verify intervalType. + var intervalType string + if err := json.Unmarshal(sched["intervalType"], &intervalType); err != nil { + t.Fatalf("unmarshal intervalType: %v", err) + } + if intervalType != tc.wantInterval { + t.Errorf("intervalType: want %q, got %q", tc.wantInterval, intervalType) + } + + // IMP-MAP-004b: WEEKLY must have daysOfWeek. + if tc.wantDOW { + if _, ok := sched["daysOfWeek"]; !ok { + t.Error("WEEKLY schedule missing daysOfWeek in JSON (API would have no day-of-week info)") + } + } + + // IMP-MAP-004c: MONTHLY must have daysOfMonth. + if tc.wantDOM { + if _, ok := sched["daysOfMonth"]; !ok { + t.Error("MONTHLY schedule missing daysOfMonth in JSON (API would have no day-of-month info)") + } + } + + // DAILY should NOT have daysOfWeek or daysOfMonth. + if !tc.wantDOW && !tc.wantDOM { + if _, ok := sched["daysOfWeek"]; ok { + t.Error("DAILY schedule should not have daysOfWeek in JSON") + } + if _, ok := sched["daysOfMonth"]; ok { + t.Error("DAILY schedule should not have daysOfMonth in JSON") + } + } + }) + } +} + +// TestIMP_MAP_004b_WeeklyDaysOfWeekValue verifies the daysOfWeek.days array +// contains the correct day-of-week integer for weekly schedules. +func TestIMP_MAP_004b_WeeklyDaysOfWeekValue(t *testing.T) { + cases := []struct { + cron string + wantDay int32 + }{ + {"0 0 * * 0", 0}, // Sunday + {"0 0 * * 1", 1}, // Monday + {"0 0 * * 6", 6}, // Saturday + } + + for _, tc := range cases { + t.Run(string(rune('0'+tc.wantDay)), func(t *testing.T) { + ss := &cofetch.ScanSetting{Namespace: "ns", Name: "s", Schedule: tc.cron} + binding := cofetch.ScanSettingBinding{ + Namespace: "ns", Name: "b", ScanSettingName: "s", + Profiles: []cofetch.ProfileRef{{Name: "p", Kind: "Profile"}}, + } + result := MapBinding(binding, ss, &models.Config{ACSClusterID: "c"}) + if result.Problem != nil { + t.Fatalf("unexpected problem: %+v", result.Problem) + } + + data, _ := json.Marshal(result.Payload) + var parsed struct { + ScanConfig struct { + ScanSchedule struct { + DaysOfWeek struct { + Days []int32 `json:"days"` + } `json:"daysOfWeek"` + } `json:"scanSchedule"` + } `json:"scanConfig"` + } + if err := json.Unmarshal(data, &parsed); err != nil { + t.Fatalf("unmarshal: %v", err) + } + days := parsed.ScanConfig.ScanSchedule.DaysOfWeek.Days + if len(days) != 1 || days[0] != tc.wantDay { + t.Errorf("daysOfWeek.days: want [%d], got %v", tc.wantDay, days) + } + }) + } +} diff --git a/scripts/compliance-operator-importer/internal/mapping/schedule.go b/scripts/compliance-operator-importer/internal/mapping/schedule.go new file mode 100644 index 0000000000000..004a07939f7d5 --- /dev/null +++ b/scripts/compliance-operator-importer/internal/mapping/schedule.go @@ -0,0 +1,126 @@ +package mapping + +import ( + "errors" + "fmt" + "strconv" + "strings" + + "github.com/stackrox/co-acs-importer/internal/models" +) + +// ConvertCronToACSSchedule converts a standard 5-field cron expression to an +// ACS Schedule object. +// +// Supported cases: +// +// "minute hour * * *" -> DAILY, hour=H, minute=M +// "minute hour * * dayOfWeek" -> WEEKLY, hour=H, minute=M, day=DOW +// "minute hour dayOfMonth * *" -> MONTHLY, hour=H, minute=M, days=[DOM] +// +// Returns an error for: +// - non-5-field expressions +// - step notation (*/n or n/m) +// - range notation (n-m) +// - both day-of-month and day-of-week set (ambiguous) +// - out-of-range values +// - any other unsupported syntax +// +// The error message is suitable for inclusion in a Problem.FixHint. +func ConvertCronToACSSchedule(cron string) (*models.ACSSchedule, error) { + cron = strings.TrimSpace(cron) + if cron == "" { + return nil, errors.New("cron expression is empty; provide a valid 5-field cron expression (e.g. \"0 2 * * *\" for daily at 02:00)") + } + + fields := strings.Fields(cron) + if len(fields) != 5 { + return nil, fmt.Errorf("cron expression %q has %d field(s); a standard cron requires exactly 5 fields: minute hour day-of-month month day-of-week", cron, len(fields)) + } + + minute, hour, dom, month, dow := fields[0], fields[1], fields[2], fields[3], fields[4] + + // Reject unsupported syntax in any field. + for _, f := range fields { + if strings.Contains(f, "/") { + return nil, fmt.Errorf("step notation %q is not supported; use a simple numeric cron expression (e.g. \"0 2 * * *\")", f) + } + if strings.Contains(f, "-") { + return nil, fmt.Errorf("range notation %q is not supported; use a simple numeric cron expression (e.g. \"0 2 * * *\")", f) + } + } + + // Month must always be wildcard; we don't support specific-month scheduling. + if month != "*" { + return nil, fmt.Errorf("specific month field %q is not supported; set month to \"*\" and use day-of-month or day-of-week instead", month) + } + + // Parse minute. + minVal, err := parseField(minute, "minute", 0, 59) + if err != nil { + return nil, err + } + + // Parse hour. + hourVal, err := parseField(hour, "hour", 0, 23) + if err != nil { + return nil, err + } + + // Determine schedule type by which positional fields are wildcards. + domIsWild := dom == "*" + dowIsWild := dow == "*" + + switch { + case !domIsWild && !dowIsWild: + // Both set — ambiguous. + return nil, fmt.Errorf("cron expression %q sets both day-of-month (%s) and day-of-week (%s), which is ambiguous; set exactly one to \"*\"", cron, dom, dow) + + case domIsWild && dowIsWild: + // DAILY: "minute hour * * *" + return &models.ACSSchedule{ + IntervalType: "DAILY", + Hour: hourVal, + Minute: minVal, + }, nil + + case domIsWild && !dowIsWild: + // WEEKLY: "minute hour * * dayOfWeek" + dowVal, err := parseField(dow, "day-of-week", 0, 6) + if err != nil { + return nil, err + } + return &models.ACSSchedule{ + IntervalType: "WEEKLY", + Hour: hourVal, + Minute: minVal, + DaysOfWeek: &models.ACSDaysOfWeek{Days: []int32{dowVal}}, + }, nil + + default: + // MONTHLY: "minute hour dayOfMonth * *" + domVal, err := parseField(dom, "day-of-month", 1, 31) + if err != nil { + return nil, err + } + return &models.ACSSchedule{ + IntervalType: "MONTHLY", + Hour: hourVal, + Minute: minVal, + DaysOfMonth: &models.ACSDaysOfMonth{Days: []int32{domVal}}, + }, nil + } +} + +// parseField parses a single cron field that must be a plain integer (no wildcards +// allowed at this point) within [min, max]. +func parseField(val, name string, min, max int) (int32, error) { + n, err := strconv.Atoi(val) + if err != nil { + return 0, fmt.Errorf("cron field %q (value %q) is not a valid integer; use a plain number or \"*\" for %s", name, val, name) + } + if n < min || n > max { + return 0, fmt.Errorf("cron field %q value %d is out of range [%d, %d]", name, n, min, max) + } + return int32(n), nil +} diff --git a/scripts/compliance-operator-importer/internal/mapping/schedule_test.go b/scripts/compliance-operator-importer/internal/mapping/schedule_test.go new file mode 100644 index 0000000000000..462a1bc4890bd --- /dev/null +++ b/scripts/compliance-operator-importer/internal/mapping/schedule_test.go @@ -0,0 +1,225 @@ +package mapping + +import ( + "testing" + + "github.com/stackrox/co-acs-importer/internal/models" +) + +// TestIMP_MAP_003_IMP_MAP_004_DailySchedule verifies that a daily cron expression +// produces oneTimeScan=false (IMP-MAP-003) and a present DAILY schedule (IMP-MAP-004). +func TestIMP_MAP_003_IMP_MAP_004_DailySchedule(t *testing.T) { + got, err := ConvertCronToACSSchedule("0 0 * * *") + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if got == nil { + t.Fatal("expected non-nil schedule") + } + if got.IntervalType != "DAILY" { + t.Errorf("IntervalType: want DAILY, got %q", got.IntervalType) + } + if got.Hour != 0 { + t.Errorf("Hour: want 0, got %d", got.Hour) + } + if got.Minute != 0 { + t.Errorf("Minute: want 0, got %d", got.Minute) + } + if got.DaysOfWeek != nil { + t.Errorf("DaysOfWeek: want nil for DAILY, got %+v", got.DaysOfWeek) + } + if got.DaysOfMonth != nil { + t.Errorf("DaysOfMonth: want nil for DAILY, got %+v", got.DaysOfMonth) + } +} + +// TestIMP_MAP_003_IMP_MAP_004_DailyScheduleNonMidnight verifies non-midnight daily. +func TestIMP_MAP_003_IMP_MAP_004_DailyScheduleNonMidnight(t *testing.T) { + got, err := ConvertCronToACSSchedule("30 14 * * *") + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if got.IntervalType != "DAILY" { + t.Errorf("IntervalType: want DAILY, got %q", got.IntervalType) + } + if got.Hour != 14 { + t.Errorf("Hour: want 14, got %d", got.Hour) + } + if got.Minute != 30 { + t.Errorf("Minute: want 30, got %d", got.Minute) + } +} + +// TestIMP_MAP_003_IMP_MAP_004_WeeklySchedule verifies that a weekly cron expression +// produces a WEEKLY schedule with the correct day (IMP-MAP-003, IMP-MAP-004). +func TestIMP_MAP_003_IMP_MAP_004_WeeklySchedule(t *testing.T) { + // "0 2 * * 0" means Sunday at 02:00 + got, err := ConvertCronToACSSchedule("0 2 * * 0") + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if got.IntervalType != "WEEKLY" { + t.Errorf("IntervalType: want WEEKLY, got %q", got.IntervalType) + } + if got.Hour != 2 { + t.Errorf("Hour: want 2, got %d", got.Hour) + } + if got.Minute != 0 { + t.Errorf("Minute: want 0, got %d", got.Minute) + } + if got.DaysOfWeek == nil { + t.Fatal("DaysOfWeek: want non-nil for WEEKLY schedule") + } + if len(got.DaysOfWeek.Days) != 1 || got.DaysOfWeek.Days[0] != 0 { + t.Errorf("DaysOfWeek.Days: want [0] (Sunday), got %v", got.DaysOfWeek.Days) + } +} + +// TestIMP_MAP_003_IMP_MAP_004_WeeklyScheduleSaturday verifies Saturday weekly. +func TestIMP_MAP_003_IMP_MAP_004_WeeklyScheduleSaturday(t *testing.T) { + got, err := ConvertCronToACSSchedule("15 3 * * 6") + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if got.IntervalType != "WEEKLY" { + t.Errorf("IntervalType: want WEEKLY, got %q", got.IntervalType) + } + if got.DaysOfWeek == nil { + t.Fatal("DaysOfWeek: want non-nil for WEEKLY schedule") + } + if len(got.DaysOfWeek.Days) != 1 || got.DaysOfWeek.Days[0] != 6 { + t.Errorf("DaysOfWeek.Days: want [6] (Saturday), got %v", got.DaysOfWeek.Days) + } +} + +// TestIMP_MAP_003_IMP_MAP_004_MonthlySchedule verifies that a monthly cron expression +// produces a MONTHLY schedule with the correct day-of-month (IMP-MAP-003, IMP-MAP-004). +func TestIMP_MAP_003_IMP_MAP_004_MonthlySchedule(t *testing.T) { + // "30 6 1 * *" means 1st of every month at 06:30 + got, err := ConvertCronToACSSchedule("30 6 1 * *") + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if got.IntervalType != "MONTHLY" { + t.Errorf("IntervalType: want MONTHLY, got %q", got.IntervalType) + } + if got.Hour != 6 { + t.Errorf("Hour: want 6, got %d", got.Hour) + } + if got.Minute != 30 { + t.Errorf("Minute: want 30, got %d", got.Minute) + } + if got.DaysOfMonth == nil { + t.Fatal("DaysOfMonth: want non-nil for MONTHLY schedule") + } + if len(got.DaysOfMonth.Days) != 1 || got.DaysOfMonth.Days[0] != 1 { + t.Errorf("DaysOfMonth.Days: want [1], got %v", got.DaysOfMonth.Days) + } +} + +// TestIMP_MAP_012_IMP_MAP_015_InvalidCronNaturalLanguage verifies that a human-readable +// schedule string is rejected with an error that mentions cron (IMP-MAP-012, IMP-MAP-015). +func TestIMP_MAP_012_IMP_MAP_015_InvalidCronNaturalLanguage(t *testing.T) { + got, err := ConvertCronToACSSchedule("every day at noon") + if err == nil { + t.Fatalf("expected error for natural-language expression, got %+v", got) + } + errStr := err.Error() + if len(errStr) == 0 { + t.Error("error message must not be empty") + } +} + +// TestIMP_MAP_012_IMP_MAP_015_InvalidCronStepNotation verifies that step notation +// (*/n) is rejected as unsupported (IMP-MAP-012, IMP-MAP-015). +func TestIMP_MAP_012_IMP_MAP_015_InvalidCronStepNotation(t *testing.T) { + got, err := ConvertCronToACSSchedule("*/6 * * * *") + if err == nil { + t.Fatalf("expected error for step notation, got %+v", got) + } +} + +// TestIMP_MAP_012_IMP_MAP_015_InvalidCronRange verifies that range notation (n-m) +// is rejected (IMP-MAP-012, IMP-MAP-015). +func TestIMP_MAP_012_IMP_MAP_015_InvalidCronRange(t *testing.T) { + got, err := ConvertCronToACSSchedule("0 0 * * 1-5") + if err == nil { + t.Fatalf("expected error for range notation, got %+v", got) + } +} + +// TestIMP_MAP_012_IMP_MAP_015_InvalidCronEmpty verifies that an empty string +// is rejected (IMP-MAP-012, IMP-MAP-015). +func TestIMP_MAP_012_IMP_MAP_015_InvalidCronEmpty(t *testing.T) { + got, err := ConvertCronToACSSchedule("") + if err == nil { + t.Fatalf("expected error for empty cron, got %+v", got) + } +} + +// TestIMP_MAP_012_IMP_MAP_015_InvalidCronTooFewFields verifies that a cron with +// fewer than 5 fields is rejected. +func TestIMP_MAP_012_IMP_MAP_015_InvalidCronTooFewFields(t *testing.T) { + got, err := ConvertCronToACSSchedule("0 0 * *") + if err == nil { + t.Fatalf("expected error for 4-field cron, got %+v", got) + } +} + +// TestIMP_MAP_012_IMP_MAP_015_InvalidCronTooManyFields verifies that a cron with +// more than 5 fields is rejected. +func TestIMP_MAP_012_IMP_MAP_015_InvalidCronTooManyFields(t *testing.T) { + got, err := ConvertCronToACSSchedule("0 0 * * * *") + if err == nil { + t.Fatalf("expected error for 6-field cron, got %+v", got) + } +} + +// TestIMP_MAP_012_IMP_MAP_015_InvalidCronBothDOMAndDOW verifies that a cron +// with both day-of-month and day-of-week set is rejected as ambiguous. +func TestIMP_MAP_012_IMP_MAP_015_InvalidCronBothDOMAndDOW(t *testing.T) { + got, err := ConvertCronToACSSchedule("0 0 1 * 0") + if err == nil { + t.Fatalf("expected error for both DOM and DOW set, got %+v", got) + } +} + +// TestIMP_MAP_012_IMP_MAP_015_InvalidCronOutOfRangeHour verifies out-of-range hour. +func TestIMP_MAP_012_IMP_MAP_015_InvalidCronOutOfRangeHour(t *testing.T) { + got, err := ConvertCronToACSSchedule("0 25 * * *") + if err == nil { + t.Fatalf("expected error for hour=25, got %+v", got) + } +} + +// TestIMP_MAP_012_IMP_MAP_015_InvalidCronOutOfRangeMinute verifies out-of-range minute. +func TestIMP_MAP_012_IMP_MAP_015_InvalidCronOutOfRangeMinute(t *testing.T) { + got, err := ConvertCronToACSSchedule("60 0 * * *") + if err == nil { + t.Fatalf("expected error for minute=60, got %+v", got) + } +} + +// TestIMP_MAP_003_IMP_MAP_004_MultiValueDOMMonthly verifies that multiple days-of-month +// in a monthly cron are accepted (e.g. "0 0 1,15 * *"). +func TestIMP_MAP_003_IMP_MAP_004_MultiValueDOMMonthly(t *testing.T) { + got, err := ConvertCronToACSSchedule("0 0 15 * *") + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if got.IntervalType != "MONTHLY" { + t.Errorf("IntervalType: want MONTHLY, got %q", got.IntervalType) + } + if got.DaysOfMonth == nil || len(got.DaysOfMonth.Days) == 0 { + t.Fatal("DaysOfMonth: want non-nil with days") + } + if got.DaysOfMonth.Days[0] != 15 { + t.Errorf("DaysOfMonth.Days[0]: want 15, got %d", got.DaysOfMonth.Days[0]) + } +} + +// Compile-time check that the return type matches models.ACSSchedule. +var _ *models.ACSSchedule = func() *models.ACSSchedule { + s, _ := ConvertCronToACSSchedule("0 0 * * *") + return s +}() diff --git a/scripts/compliance-operator-importer/internal/merge/merge.go b/scripts/compliance-operator-importer/internal/merge/merge.go new file mode 100644 index 0000000000000..f58dc17fedbf2 --- /dev/null +++ b/scripts/compliance-operator-importer/internal/merge/merge.go @@ -0,0 +1,191 @@ +// Package merge handles merging of ScanSettingBindings across multiple clusters. +package merge + +import ( + "fmt" + "slices" + "strings" + + "github.com/stackrox/co-acs-importer/internal/models" +) + +// MappedSSB represents a ScanSettingBinding that has been mapped to an ACS payload. +type MappedSSB struct { + Name string // SSB name + Profiles []string + Payload models.ACSCreatePayload +} + +// MergeResult holds the output of the merge operation. +type MergeResult struct { + Merged []MappedSSB + Problems []models.Problem +} + +// MergeSSBs merges ScanSettingBindings from multiple clusters. +// +// Input: map of clusterID → []MappedSSB +// Output: []MergedSSB (one per unique SSB name, with merged cluster IDs) +// +// Logic (IMP-MAP-019, IMP-MAP-020, IMP-MAP-021): +// - Group by SSB name across all clusters. +// - For each group: +// - If all SSBs have identical profiles (sorted) and identical schedule: merge into one, union cluster IDs. +// - If profiles or schedule differ: error for that SSB name, add problem entry. +func MergeSSBs(clusterSSBs map[string][]MappedSSB) MergeResult { + // Group SSBs by name. + groups := make(map[string][]clusterSSBEntry) + for clusterID, ssbs := range clusterSSBs { + for _, ssb := range ssbs { + groups[ssb.Name] = append(groups[ssb.Name], clusterSSBEntry{ + clusterID: clusterID, + ssb: ssb, + }) + } + } + + var merged []MappedSSB + var problems []models.Problem + + for ssbName, entries := range groups { + if len(entries) == 1 { + // Only one cluster has this SSB; no merging needed. + merged = append(merged, entries[0].ssb) + continue + } + + // Check if all SSBs in the group are identical (same profiles and schedule). + first := entries[0].ssb + identical := true + var conflictClusters []string + + for _, entry := range entries[1:] { + if !ssbsAreIdentical(first, entry.ssb) { + identical = false + conflictClusters = append(conflictClusters, entry.clusterID) + } + } + + if !identical { + // IMP-MAP-020: profiles or schedule differ. + conflictClusters = append([]string{entries[0].clusterID}, conflictClusters...) + problems = append(problems, models.Problem{ + Severity: models.SeverityError, + Category: models.CategoryConflict, + ResourceRef: ssbName, + Description: fmt.Sprintf( + "ScanSettingBinding %q exists in multiple clusters with different profiles or schedules: %s", + ssbName, strings.Join(conflictClusters, ", "), + ), + FixHint: "Ensure SSBs with the same name have identical profiles and schedules across all clusters, or rename them uniquely per cluster.", + Skipped: true, + }) + continue + } + + // IMP-MAP-019, IMP-MAP-021: merge clusters. + mergedSSB := first + var allClusters []string + for _, entry := range entries { + allClusters = append(allClusters, entry.clusterID) + } + slices.Sort(allClusters) + mergedSSB.Payload.Clusters = allClusters + merged = append(merged, mergedSSB) + } + + return MergeResult{ + Merged: merged, + Problems: problems, + } +} + +// clusterSSBEntry pairs a cluster ID with an SSB. +type clusterSSBEntry struct { + clusterID string + ssb MappedSSB +} + +// ssbsAreIdentical checks if two SSBs have the same profiles and schedule. +func ssbsAreIdentical(a, b MappedSSB) bool { + // Compare sorted profiles. + aProfiles := make([]string, len(a.Profiles)) + bProfiles := make([]string, len(b.Profiles)) + copy(aProfiles, a.Profiles) + copy(bProfiles, b.Profiles) + slices.Sort(aProfiles) + slices.Sort(bProfiles) + + if !stringSlicesEqual(aProfiles, bProfiles) { + return false + } + + // Compare schedules. + return schedulesEqual(a.Payload.ScanConfig.ScanSchedule, b.Payload.ScanConfig.ScanSchedule) +} + +// stringSlicesEqual checks if two string slices are equal. +func stringSlicesEqual(a, b []string) bool { + if len(a) != len(b) { + return false + } + for i := range a { + if a[i] != b[i] { + return false + } + } + return true +} + +// schedulesEqual checks if two ACS schedules are equal. +func schedulesEqual(a, b *models.ACSSchedule) bool { + if a == nil && b == nil { + return true + } + if a == nil || b == nil { + return false + } + + if a.Hour != b.Hour || a.Minute != b.Minute { + return false + } + + if a.IntervalType != b.IntervalType { + return false + } + + // Compare DaysOfWeek. + if (a.DaysOfWeek == nil) != (b.DaysOfWeek == nil) { + return false + } + if a.DaysOfWeek != nil && b.DaysOfWeek != nil { + if !int32SlicesEqual(a.DaysOfWeek.Days, b.DaysOfWeek.Days) { + return false + } + } + + // Compare DaysOfMonth. + if (a.DaysOfMonth == nil) != (b.DaysOfMonth == nil) { + return false + } + if a.DaysOfMonth != nil && b.DaysOfMonth != nil { + if !int32SlicesEqual(a.DaysOfMonth.Days, b.DaysOfMonth.Days) { + return false + } + } + + return true +} + +// int32SlicesEqual checks if two int32 slices are equal. +func int32SlicesEqual(a, b []int32) bool { + if len(a) != len(b) { + return false + } + for i := range a { + if a[i] != b[i] { + return false + } + } + return true +} diff --git a/scripts/compliance-operator-importer/internal/merge/merge_test.go b/scripts/compliance-operator-importer/internal/merge/merge_test.go new file mode 100644 index 0000000000000..4f1085fbc9632 --- /dev/null +++ b/scripts/compliance-operator-importer/internal/merge/merge_test.go @@ -0,0 +1,295 @@ +package merge + +import ( + "slices" + "testing" + + "github.com/stackrox/co-acs-importer/internal/models" +) + +// TestIMP_MAP_019_MergeSameProfilesSameSchedule verifies that SSBs with the +// same name, same profiles, and same schedule are merged across clusters. +func TestIMP_MAP_019_MergeSameProfilesSameSchedule(t *testing.T) { + input := map[string][]MappedSSB{ + "cluster-1": { + { + Name: "cis-benchmark", + Profiles: []string{"ocp4-cis", "ocp4-cis-node"}, + Payload: models.ACSCreatePayload{ + ScanName: "cis-benchmark", + ScanConfig: models.ACSBaseScanConfig{ + Profiles: []string{"ocp4-cis", "ocp4-cis-node"}, + ScanSchedule: &models.ACSSchedule{ + Hour: 2, + Minute: 30, + }, + }, + Clusters: []string{"cluster-1"}, + }, + }, + }, + "cluster-2": { + { + Name: "cis-benchmark", + Profiles: []string{"ocp4-cis", "ocp4-cis-node"}, + Payload: models.ACSCreatePayload{ + ScanName: "cis-benchmark", + ScanConfig: models.ACSBaseScanConfig{ + Profiles: []string{"ocp4-cis", "ocp4-cis-node"}, + ScanSchedule: &models.ACSSchedule{ + Hour: 2, + Minute: 30, + }, + }, + Clusters: []string{"cluster-2"}, + }, + }, + }, + } + + result := MergeSSBs(input) + if len(result.Merged) != 1 { + t.Fatalf("expected 1 merged SSB, got %d", len(result.Merged)) + } + + merged := result.Merged[0] + if merged.Name != "cis-benchmark" { + t.Errorf("expected SSB name 'cis-benchmark', got %q", merged.Name) + } + + // Clusters should be merged. + slices.Sort(merged.Payload.Clusters) + expected := []string{"cluster-1", "cluster-2"} + if !stringSlicesEqual(merged.Payload.Clusters, expected) { + t.Errorf("expected clusters %v, got %v", expected, merged.Payload.Clusters) + } + + if len(result.Problems) != 0 { + t.Errorf("expected no problems, got %d", len(result.Problems)) + } +} + +// TestIMP_MAP_021_MergeIdenticalSSBsUnion verifies that identical SSBs are +// merged with a union of cluster IDs. +func TestIMP_MAP_021_MergeIdenticalSSBsUnion(t *testing.T) { + input := map[string][]MappedSSB{ + "cluster-a": { + { + Name: "ssb-1", + Profiles: []string{"profile-x"}, + Payload: models.ACSCreatePayload{ + ScanName: "ssb-1", + ScanConfig: models.ACSBaseScanConfig{ + Profiles: []string{"profile-x"}, + ScanSchedule: &models.ACSSchedule{ + Hour: 10, + Minute: 0, + }, + }, + Clusters: []string{"cluster-a"}, + }, + }, + }, + "cluster-b": { + { + Name: "ssb-1", + Profiles: []string{"profile-x"}, + Payload: models.ACSCreatePayload{ + ScanName: "ssb-1", + ScanConfig: models.ACSBaseScanConfig{ + Profiles: []string{"profile-x"}, + ScanSchedule: &models.ACSSchedule{ + Hour: 10, + Minute: 0, + }, + }, + Clusters: []string{"cluster-b"}, + }, + }, + }, + "cluster-c": { + { + Name: "ssb-1", + Profiles: []string{"profile-x"}, + Payload: models.ACSCreatePayload{ + ScanName: "ssb-1", + ScanConfig: models.ACSBaseScanConfig{ + Profiles: []string{"profile-x"}, + ScanSchedule: &models.ACSSchedule{ + Hour: 10, + Minute: 0, + }, + }, + Clusters: []string{"cluster-c"}, + }, + }, + }, + } + + result := MergeSSBs(input) + if len(result.Merged) != 1 { + t.Fatalf("expected 1 merged SSB, got %d", len(result.Merged)) + } + + merged := result.Merged[0] + slices.Sort(merged.Payload.Clusters) + expected := []string{"cluster-a", "cluster-b", "cluster-c"} + if !stringSlicesEqual(merged.Payload.Clusters, expected) { + t.Errorf("expected clusters %v, got %v", expected, merged.Payload.Clusters) + } +} + +// TestIMP_MAP_020_DifferentProfilesError verifies that SSBs with the same name +// but different profiles produce an error. +func TestIMP_MAP_020_DifferentProfilesError(t *testing.T) { + input := map[string][]MappedSSB{ + "cluster-1": { + { + Name: "ssb-conflict", + Profiles: []string{"profile-a"}, + Payload: models.ACSCreatePayload{ + ScanName: "ssb-conflict", + ScanConfig: models.ACSBaseScanConfig{ + Profiles: []string{"profile-a"}, + }, + Clusters: []string{"cluster-1"}, + }, + }, + }, + "cluster-2": { + { + Name: "ssb-conflict", + Profiles: []string{"profile-b"}, + Payload: models.ACSCreatePayload{ + ScanName: "ssb-conflict", + ScanConfig: models.ACSBaseScanConfig{ + Profiles: []string{"profile-b"}, + }, + Clusters: []string{"cluster-2"}, + }, + }, + }, + } + + result := MergeSSBs(input) + if len(result.Merged) != 0 { + t.Errorf("expected no merged SSBs when profiles differ, got %d", len(result.Merged)) + } + if len(result.Problems) != 1 { + t.Fatalf("expected 1 problem, got %d", len(result.Problems)) + } + + problem := result.Problems[0] + if problem.Severity != models.SeverityError { + t.Errorf("expected error severity, got %v", problem.Severity) + } + if problem.Category != models.CategoryConflict { + t.Errorf("expected conflict category, got %v", problem.Category) + } +} + +// TestIMP_MAP_020_DifferentScheduleError verifies that SSBs with the same name +// and profiles but different schedules produce an error. +func TestIMP_MAP_020_DifferentScheduleError(t *testing.T) { + input := map[string][]MappedSSB{ + "cluster-1": { + { + Name: "ssb-sched-conflict", + Profiles: []string{"profile-x"}, + Payload: models.ACSCreatePayload{ + ScanName: "ssb-sched-conflict", + ScanConfig: models.ACSBaseScanConfig{ + Profiles: []string{"profile-x"}, + ScanSchedule: &models.ACSSchedule{ + Hour: 10, + Minute: 0, + }, + }, + Clusters: []string{"cluster-1"}, + }, + }, + }, + "cluster-2": { + { + Name: "ssb-sched-conflict", + Profiles: []string{"profile-x"}, + Payload: models.ACSCreatePayload{ + ScanName: "ssb-sched-conflict", + ScanConfig: models.ACSBaseScanConfig{ + Profiles: []string{"profile-x"}, + ScanSchedule: &models.ACSSchedule{ + Hour: 14, + Minute: 30, + }, + }, + Clusters: []string{"cluster-2"}, + }, + }, + }, + } + + result := MergeSSBs(input) + if len(result.Merged) != 0 { + t.Errorf("expected no merged SSBs when schedules differ, got %d", len(result.Merged)) + } + if len(result.Problems) != 1 { + t.Fatalf("expected 1 problem, got %d", len(result.Problems)) + } + + problem := result.Problems[0] + if problem.Severity != models.SeverityError { + t.Errorf("expected error severity, got %v", problem.Severity) + } +} + +// TestSSBsUniqueToEachCluster verifies that SSBs unique to each cluster are +// not merged. +func TestSSBsUniqueToEachCluster(t *testing.T) { + input := map[string][]MappedSSB{ + "cluster-1": { + { + Name: "ssb-unique-1", + Profiles: []string{"profile-a"}, + Payload: models.ACSCreatePayload{ + ScanName: "ssb-unique-1", + ScanConfig: models.ACSBaseScanConfig{ + Profiles: []string{"profile-a"}, + }, + Clusters: []string{"cluster-1"}, + }, + }, + }, + "cluster-2": { + { + Name: "ssb-unique-2", + Profiles: []string{"profile-b"}, + Payload: models.ACSCreatePayload{ + ScanName: "ssb-unique-2", + ScanConfig: models.ACSBaseScanConfig{ + Profiles: []string{"profile-b"}, + }, + Clusters: []string{"cluster-2"}, + }, + }, + }, + } + + result := MergeSSBs(input) + if len(result.Merged) != 2 { + t.Fatalf("expected 2 merged SSBs (unique ones not merged), got %d", len(result.Merged)) + } + + names := []string{result.Merged[0].Name, result.Merged[1].Name} + slices.Sort(names) + expected := []string{"ssb-unique-1", "ssb-unique-2"} + if !stringSlicesEqual(names, expected) { + t.Errorf("expected SSB names %v, got %v", expected, names) + } + + // Each should have only one cluster. + for _, merged := range result.Merged { + if len(merged.Payload.Clusters) != 1 { + t.Errorf("expected 1 cluster for unique SSB %q, got %d", merged.Name, len(merged.Payload.Clusters)) + } + } +} diff --git a/scripts/compliance-operator-importer/internal/models/models.go b/scripts/compliance-operator-importer/internal/models/models.go new file mode 100644 index 0000000000000..9dd1a2b617e2b --- /dev/null +++ b/scripts/compliance-operator-importer/internal/models/models.go @@ -0,0 +1,169 @@ +package models + +import ( + "context" + "time" +) + +// AuthMode controls which ACS authentication scheme the importer uses. +type AuthMode string + +const ( + AuthModeToken AuthMode = "token" + AuthModeBasic AuthMode = "basic" +) + +// Config holds all resolved configuration for a single importer run. +type Config struct { + ACSEndpoint string // from --endpoint or ROX_ENDPOINT + AuthMode AuthMode // auto-inferred from env vars (ROX_API_TOKEN / ROX_ADMIN_PASSWORD) + Username string // from --username or ROX_ADMIN_USER (default "admin") + CONamespace string // empty when COAllNamespaces=true + COAllNamespaces bool + ACSClusterID string // auto-discovered per context; set at runtime during iteration + DryRun bool + ReportJSON string + RequestTimeout time.Duration + MaxRetries int + CACertFile string + InsecureSkipVerify bool + OverwriteExisting bool + Contexts []string // opt-in --context filter; empty means all contexts +} + +// Severity classifies how severe a Problem is. +type Severity string + +const ( + SeverityError Severity = "error" + SeverityWarning Severity = "warning" +) + +// Category classifies what kind of issue a Problem represents. +type Category string + +const ( + CategoryInput Category = "input" + CategoryMapping Category = "mapping" + CategoryConflict Category = "conflict" + CategoryAuth Category = "auth" + CategoryAPI Category = "api" + CategoryRetry Category = "retry" + CategoryValidation Category = "validation" +) + +// Problem is a structured issue entry recorded during an importer run. +type Problem struct { + Severity Severity `json:"severity"` + Category Category `json:"category"` + ResourceRef string `json:"resourceRef"` // "namespace/name" or synthetic + Description string `json:"description"` + FixHint string `json:"fixHint"` + Skipped bool `json:"skipped"` +} + +// ACSSchedule is the schedule portion of an ACS scan configuration. +// Fields map to the v2.Schedule proto message in proto/api/v2/common.proto. +type ACSSchedule struct { + IntervalType string `json:"intervalType,omitempty"` + Hour int32 `json:"hour"` + Minute int32 `json:"minute"` + DaysOfWeek *ACSDaysOfWeek `json:"daysOfWeek,omitempty"` + DaysOfMonth *ACSDaysOfMonth `json:"daysOfMonth,omitempty"` +} + +// ACSDaysOfWeek holds days for a weekly ACS schedule (Sunday=0 .. Saturday=6). +type ACSDaysOfWeek struct { + Days []int32 `json:"days"` +} + +// ACSDaysOfMonth holds days for a monthly ACS schedule. +type ACSDaysOfMonth struct { + Days []int32 `json:"days"` +} + +// ACSBaseScanConfig is the scanConfig sub-object in an ACS create payload. +type ACSBaseScanConfig struct { + OneTimeScan bool `json:"oneTimeScan"` + Profiles []string `json:"profiles"` + ScanSchedule *ACSSchedule `json:"scanSchedule,omitempty"` + Description string `json:"description"` +} + +// ACSCreatePayload is the request body for POST /v2/compliance/scan/configurations +// and PUT /v2/compliance/scan/configurations/{id}. +type ACSCreatePayload struct { + ScanName string `json:"scanName"` + ScanConfig ACSBaseScanConfig `json:"scanConfig"` + Clusters []string `json:"clusters"` +} + +// ACSConfigSummary is a single entry from the ACS list response. +type ACSConfigSummary struct { + ID string `json:"id"` + ScanName string `json:"scanName"` +} + +// ACSListResponse matches the JSON from GET /v2/compliance/scan/configurations. +type ACSListResponse struct { + Configurations []ACSConfigSummary `json:"configurations"` + TotalCount int32 `json:"totalCount"` +} + +// ReportMeta is metadata written at the top of the JSON report. +type ReportMeta struct { + Timestamp string `json:"timestamp"` + DryRun bool `json:"dryRun"` + NamespaceScope string `json:"namespaceScope"` + Mode string `json:"mode"` // always "create-only" +} + +// ReportCounts summarises action totals for the JSON report. +type ReportCounts struct { + Discovered int `json:"discovered"` + Create int `json:"create"` + Update int `json:"update"` + Skip int `json:"skip"` + Failed int `json:"failed"` +} + +// ReportItemSource identifies the CO source for one report item. +type ReportItemSource struct { + Namespace string `json:"namespace"` + BindingName string `json:"bindingName"` + ScanSettingName string `json:"scanSettingName"` +} + +// ReportItem records the outcome for one ScanSettingBinding. +type ReportItem struct { + Source ReportItemSource `json:"source"` + Action string `json:"action"` // create|skip|fail + Reason string `json:"reason"` + Attempts int `json:"attempts"` + ACSScanConfigID string `json:"acsScanConfigId,omitempty"` + Error string `json:"error,omitempty"` +} + +// Report is the top-level structure written to --report-json. +type Report struct { + Meta ReportMeta `json:"meta"` + Counts ReportCounts `json:"counts"` + Items []ReportItem `json:"items"` + Problems []Problem `json:"problems"` +} + +// ACSClusterInfo represents a cluster managed by ACS. +type ACSClusterInfo struct { + ID string // ACS cluster UUID + Name string // cluster display name + ProviderClusterID string // from status.providerMetadata.cluster.id (e.g. OpenShift cluster ID) +} + +// ACSClient is the interface for ACS API operations. +type ACSClient interface { + Preflight(ctx context.Context) error + ListScanConfigurations(ctx context.Context) ([]ACSConfigSummary, error) + CreateScanConfiguration(ctx context.Context, payload ACSCreatePayload) (string, error) + UpdateScanConfiguration(ctx context.Context, id string, payload ACSCreatePayload) error + ListClusters(ctx context.Context) ([]ACSClusterInfo, error) +} diff --git a/scripts/compliance-operator-importer/internal/preflight/preflight.go b/scripts/compliance-operator-importer/internal/preflight/preflight.go new file mode 100644 index 0000000000000..dbf2fef57822f --- /dev/null +++ b/scripts/compliance-operator-importer/internal/preflight/preflight.go @@ -0,0 +1,159 @@ +// Package preflight verifies that the ACS endpoint is reachable and the +// supplied credentials are accepted before any resource processing begins. +package preflight + +import ( + "context" + "crypto/tls" + "crypto/x509" + "encoding/base64" + "errors" + "fmt" + "net/http" + "os" + "strings" + + "github.com/stackrox/co-acs-importer/internal/models" +) + +const preflightPath = "/v2/compliance/scan/configurations?pagination.limit=1" + +// Run performs preflight checks in order: +// 1. Verify endpoint uses https:// (IMP-CLI-013). +// 2. Verify auth material is non-empty for the inferred mode (IMP-CLI-014). +// 3. Probe GET /v2/compliance/scan/configurations?pagination.limit=1 (IMP-CLI-015). +// 4. HTTP 401/403 => fail-fast with a remediation message (IMP-CLI-016). +// +// Returns nil on success, or an error with a remediation hint on failure. +func Run(ctx context.Context, cfg *models.Config) error { + // IMP-CLI-013: endpoint must be https://. + if !strings.HasPrefix(cfg.ACSEndpoint, "https://") { + return fmt.Errorf( + "preflight failed: endpoint %q must start with https://\n"+ + "Fix: use --endpoint https://", + cfg.ACSEndpoint, + ) + } + + // IMP-CLI-014: auth material must be non-empty. + if err := checkAuthMaterial(cfg); err != nil { + return err + } + + client, err := buildHTTPClient(cfg) + if err != nil { + return fmt.Errorf("preflight failed: could not build HTTP client: %w", err) + } + + url := cfg.ACSEndpoint + preflightPath + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil) + if err != nil { + return fmt.Errorf("preflight failed: could not build request: %w", err) + } + + addAuthHeader(req, cfg) + + resp, err := client.Do(req) + if err != nil { + fix := "Fix: check network connectivity and that --endpoint is correct" + var certErr *tls.CertificateVerificationError + if errors.As(err, &certErr) { + fix = "Fix: if ACS uses a self-signed certificate, supply the CA with --ca-cert-file " + + "or pass --insecure-skip-verify to disable TLS verification" + } + return fmt.Errorf( + "preflight failed: could not reach ACS at %s: %w\n%s", + cfg.ACSEndpoint, err, fix, + ) + } + defer resp.Body.Close() + + // IMP-CLI-015: success only on HTTP 200. + // IMP-CLI-016: 401/403 => fail-fast with remediation message. + switch resp.StatusCode { + case http.StatusOK: + return nil + case http.StatusUnauthorized: + return errors.New( + "preflight failed: ACS returned 401 Unauthorized\n" + + "Fix: verify your ACS API token or credentials are correct and not expired", + ) + case http.StatusForbidden: + return errors.New( + "preflight failed: ACS returned 403 Forbidden\n" + + "Fix: ensure your ACS user has the 'Compliance' permission set with at least read access", + ) + default: + return fmt.Errorf( + "preflight failed: ACS returned unexpected status %d from %s\n"+ + "Fix: verify the ACS endpoint is correct and the service is healthy", + resp.StatusCode, url, + ) + } +} + +// checkAuthMaterial validates that the auth credentials for the inferred +// mode are non-empty (IMP-CLI-014). +func checkAuthMaterial(cfg *models.Config) error { + switch cfg.AuthMode { + case models.AuthModeToken: + if os.Getenv("ROX_API_TOKEN") == "" { + return errors.New( + "preflight failed: token auth mode requires a non-empty ROX_API_TOKEN\n" + + "Fix: export ROX_API_TOKEN=", + ) + } + case models.AuthModeBasic: + if cfg.Username == "" { + return errors.New( + "preflight failed: basic auth mode requires a non-empty username\n" + + "Fix: pass --username= or set ROX_ADMIN_USER=", + ) + } + if os.Getenv("ROX_ADMIN_PASSWORD") == "" { + return errors.New( + "preflight failed: basic auth mode requires a non-empty ROX_ADMIN_PASSWORD\n" + + "Fix: export ROX_ADMIN_PASSWORD=", + ) + } + } + return nil +} + +// buildHTTPClient constructs an HTTP client with the TLS settings from cfg. +func buildHTTPClient(cfg *models.Config) (*http.Client, error) { + tlsCfg := &tls.Config{ + InsecureSkipVerify: cfg.InsecureSkipVerify, //nolint:gosec // controlled by explicit flag + } + + if cfg.CACertFile != "" { + pem, err := os.ReadFile(cfg.CACertFile) + if err != nil { + return nil, fmt.Errorf("reading CA cert file %q: %w", cfg.CACertFile, err) + } + pool := x509.NewCertPool() + if !pool.AppendCertsFromPEM(pem) { + return nil, fmt.Errorf("CA cert file %q contains no valid PEM certificates", cfg.CACertFile) + } + tlsCfg.RootCAs = pool + } + + transport := &http.Transport{TLSClientConfig: tlsCfg} + return &http.Client{ + Transport: transport, + Timeout: cfg.RequestTimeout, + }, nil +} + +// addAuthHeader sets the Authorization header on req according to cfg.AuthMode. +func addAuthHeader(req *http.Request, cfg *models.Config) { + switch cfg.AuthMode { + case models.AuthModeToken: + token := os.Getenv("ROX_API_TOKEN") + req.Header.Set("Authorization", "Bearer "+token) + case models.AuthModeBasic: + password := os.Getenv("ROX_ADMIN_PASSWORD") + creds := base64.StdEncoding.EncodeToString([]byte(cfg.Username + ":" + password)) + req.Header.Set("Authorization", "Basic "+creds) + } +} diff --git a/scripts/compliance-operator-importer/internal/preflight/preflight_test.go b/scripts/compliance-operator-importer/internal/preflight/preflight_test.go new file mode 100644 index 0000000000000..ba2bee9524e97 --- /dev/null +++ b/scripts/compliance-operator-importer/internal/preflight/preflight_test.go @@ -0,0 +1,234 @@ +package preflight + +import ( + "context" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "github.com/stackrox/co-acs-importer/internal/models" +) + +// minimalTokenConfig returns a Config wired to the given server URL in token mode. +// Caller must set ROX_API_TOKEN env var. +func minimalTokenConfig(serverURL string) *models.Config { + return &models.Config{ + ACSEndpoint: serverURL, + AuthMode: models.AuthModeToken, + RequestTimeout: 5 * time.Second, + } +} + +// TestIMP_CLI_015_200ResponseNoError verifies that a 200 response from the +// preflight probe returns nil (IMP-CLI-015). +func TestIMP_CLI_015_200ResponseNoError(t *testing.T) { + srv := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if !strings.HasPrefix(r.URL.Path, "/v2/compliance/scan/configurations") { + http.NotFound(w, r) + return + } + w.WriteHeader(http.StatusOK) + })) + defer srv.Close() + + t.Setenv("ROX_API_TOKEN", "validtoken") + + cfg := minimalTokenConfig(srv.URL) + cfg.InsecureSkipVerify = true + + err := Run(context.Background(), cfg) + if err != nil { + t.Fatalf("expected nil error for 200 response, got: %v", err) + } +} + +// TestIMP_CLI_016_401ReturnsRemediationError verifies that a 401 response +// causes a fail-fast error with remediation text (IMP-CLI-016). +func TestIMP_CLI_016_401ReturnsRemediationError(t *testing.T) { + srv := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusUnauthorized) + })) + defer srv.Close() + + t.Setenv("ROX_API_TOKEN", "badtoken") + + cfg := minimalTokenConfig(srv.URL) + cfg.InsecureSkipVerify = true + + err := Run(context.Background(), cfg) + if err == nil { + t.Fatal("expected error for 401 response, got nil") + } + msg := err.Error() + if !strings.Contains(strings.ToLower(msg), "unauthorized") && !strings.Contains(strings.ToLower(msg), "401") { + t.Errorf("expected 'unauthorized' or '401' in error message, got: %q", msg) + } + if !strings.Contains(strings.ToLower(msg), "fix:") { + t.Errorf("expected remediation hint (Fix:) in error message, got: %q", msg) + } +} + +// TestIMP_CLI_016_403ReturnsRemediationError verifies that a 403 response +// causes a fail-fast error with remediation text (IMP-CLI-016). +func TestIMP_CLI_016_403ReturnsRemediationError(t *testing.T) { + srv := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusForbidden) + })) + defer srv.Close() + + t.Setenv("ROX_API_TOKEN", "insufficienttoken") + + cfg := minimalTokenConfig(srv.URL) + cfg.InsecureSkipVerify = true + + err := Run(context.Background(), cfg) + if err == nil { + t.Fatal("expected error for 403 response, got nil") + } + msg := err.Error() + if !strings.Contains(strings.ToLower(msg), "forbidden") && !strings.Contains(strings.ToLower(msg), "403") { + t.Errorf("expected 'forbidden' or '403' in error message, got: %q", msg) + } + if !strings.Contains(strings.ToLower(msg), "fix:") { + t.Errorf("expected remediation hint (Fix:) in error message, got: %q", msg) + } +} + +// TestTLSCertErrorHintsSelfSigned verifies that when the server presents a +// certificate not trusted by the client, the error message hints at +// --ca-cert-file and --insecure-skip-verify rather than network connectivity. +func TestTLSCertErrorHintsSelfSigned(t *testing.T) { + srv := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + w.WriteHeader(http.StatusOK) + })) + defer srv.Close() + + t.Setenv("ROX_API_TOKEN", "tok") + + cfg := minimalTokenConfig(srv.URL) + // InsecureSkipVerify defaults to false — the self-signed cert will fail. + + err := Run(context.Background(), cfg) + if err == nil { + t.Fatal("expected TLS error, got nil") + } + msg := err.Error() + if !strings.Contains(msg, "--ca-cert-file") { + t.Errorf("expected hint about --ca-cert-file, got: %q", msg) + } + if !strings.Contains(msg, "--insecure-skip-verify") { + t.Errorf("expected hint about --insecure-skip-verify, got: %q", msg) + } + if strings.Contains(msg, "check network connectivity") { + t.Errorf("should not suggest network connectivity for TLS cert error, got: %q", msg) + } +} + +// TestIMP_CLI_013_NonHTTPSEndpointRejected verifies that a non-https endpoint +// is rejected before any network call is made (IMP-CLI-013). +func TestIMP_CLI_013_NonHTTPSEndpointRejected(t *testing.T) { + t.Setenv("ROX_API_TOKEN", "tok") + + cfg := &models.Config{ + ACSEndpoint: "http://central.example.com", + AuthMode: models.AuthModeToken, + RequestTimeout: 5 * time.Second, + } + + err := Run(context.Background(), cfg) + if err == nil { + t.Fatal("expected error for non-https endpoint, got nil") + } + if !strings.Contains(err.Error(), "https://") { + t.Errorf("expected error to mention https://, got: %q", err.Error()) + } +} + +// TestIMP_CLI_014_EmptyTokenRejected verifies that an empty token in token +// mode is caught before any HTTP request (IMP-CLI-014). +func TestIMP_CLI_014_EmptyTokenRejected(t *testing.T) { + t.Setenv("ROX_API_TOKEN", "") + + cfg := &models.Config{ + ACSEndpoint: "https://central.example.com", + AuthMode: models.AuthModeToken, + RequestTimeout: 5 * time.Second, + } + + err := Run(context.Background(), cfg) + if err == nil { + t.Fatal("expected error for empty token, got nil") + } + if !strings.Contains(strings.ToLower(err.Error()), "token") { + t.Errorf("expected error message to mention token, got: %q", err.Error()) + } +} + +// TestIMP_CLI_014_BasicModeEmptyPasswordRejected verifies that basic mode with +// an empty password is rejected before any HTTP request (IMP-CLI-014). +func TestIMP_CLI_014_BasicModeEmptyPasswordRejected(t *testing.T) { + t.Setenv("ROX_ADMIN_PASSWORD", "") + + cfg := &models.Config{ + ACSEndpoint: "https://central.example.com", + AuthMode: models.AuthModeBasic, + Username: "admin", + RequestTimeout: 5 * time.Second, + } + + err := Run(context.Background(), cfg) + if err == nil { + t.Fatal("expected error for empty password in basic mode, got nil") + } +} + +// TestIMP_CLI_015_ProbesCorrectPath verifies that the preflight probe sends +// a request to the expected API path (IMP-CLI-015). +func TestIMP_CLI_015_ProbesCorrectPath(t *testing.T) { + var capturedPath string + srv := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + capturedPath = r.URL.RequestURI() + w.WriteHeader(http.StatusOK) + })) + defer srv.Close() + + t.Setenv("ROX_API_TOKEN", "tok") + + cfg := minimalTokenConfig(srv.URL) + cfg.InsecureSkipVerify = true + + if err := Run(context.Background(), cfg); err != nil { + t.Fatalf("unexpected error: %v", err) + } + + expectedPath := "/v2/compliance/scan/configurations?pagination.limit=1" + if capturedPath != expectedPath { + t.Errorf("expected probe path %q, got %q", expectedPath, capturedPath) + } +} + +// TestIMP_CLI_015_BearerTokenSentInHeader verifies that the Authorization +// header is set to "Bearer " in token mode. +func TestIMP_CLI_015_BearerTokenSentInHeader(t *testing.T) { + var capturedAuth string + srv := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + capturedAuth = r.Header.Get("Authorization") + w.WriteHeader(http.StatusOK) + })) + defer srv.Close() + + t.Setenv("ROX_API_TOKEN", "my-secret-token") + + cfg := minimalTokenConfig(srv.URL) + cfg.InsecureSkipVerify = true + + if err := Run(context.Background(), cfg); err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if capturedAuth != "Bearer my-secret-token" { + t.Errorf("expected Authorization 'Bearer my-secret-token', got %q", capturedAuth) + } +} diff --git a/scripts/compliance-operator-importer/internal/problems/problems.go b/scripts/compliance-operator-importer/internal/problems/problems.go new file mode 100644 index 0000000000000..4d016ebd625bb --- /dev/null +++ b/scripts/compliance-operator-importer/internal/problems/problems.go @@ -0,0 +1,44 @@ +// Package problems provides a Collector that accumulates Problem entries +// during an importer run. All collected problems are included in the final +// JSON report and used to determine the process exit code. +package problems + +import "github.com/stackrox/co-acs-importer/internal/models" + +// Collector accumulates problems during a run. +// It is not safe for concurrent use; callers must synchronise externally if needed. +type Collector struct { + problems []models.Problem +} + +// NewCollector returns an empty Collector ready for use. +func NewCollector() *Collector { + return &Collector{} +} + +// Add appends p to the collected problem list. +// Both Description and FixHint must be non-empty to satisfy IMP-CLI-022. +func (c *Collector) Add(p models.Problem) { + c.problems = append(c.problems, p) +} + +// All returns a copy of all collected problems in insertion order. +func (c *Collector) All() []models.Problem { + if len(c.problems) == 0 { + return []models.Problem{} + } + out := make([]models.Problem, len(c.problems)) + copy(out, c.problems) + return out +} + +// HasErrors returns true if at least one collected problem has severity "error". +// Used to determine whether the run should exit with code 2 (IMP-CLI-019). +func (c *Collector) HasErrors() bool { + for _, p := range c.problems { + if p.Severity == models.SeverityError { + return true + } + } + return false +} diff --git a/scripts/compliance-operator-importer/internal/problems/problems_test.go b/scripts/compliance-operator-importer/internal/problems/problems_test.go new file mode 100644 index 0000000000000..ad0f4a02257a0 --- /dev/null +++ b/scripts/compliance-operator-importer/internal/problems/problems_test.go @@ -0,0 +1,136 @@ +package problems_test + +import ( + "testing" + + "github.com/stackrox/co-acs-importer/internal/models" + "github.com/stackrox/co-acs-importer/internal/problems" +) + +// TestIMP_CLI_022_AddAndAllRoundtrip verifies that problems added to the Collector +// are returned verbatim by All(), preserving insertion order. +// Requirement: IMP-CLI-022 (problems[] entry appended for every problem). +func TestIMP_CLI_022_AddAndAllRoundtrip(t *testing.T) { + c := problems.NewCollector() + + p1 := models.Problem{ + Severity: models.SeverityError, + Category: models.CategoryAPI, + ResourceRef: "ns/binding-a", + Description: "API returned 503", + FixHint: "Retry later or check ACS endpoint.", + Skipped: true, + } + p2 := models.Problem{ + Severity: models.SeverityWarning, + Category: models.CategoryConflict, + ResourceRef: "ns/binding-b", + Description: "Scan config already exists", + FixHint: "Delete or rename the existing config.", + Skipped: true, + } + + c.Add(p1) + c.Add(p2) + + got := c.All() + if len(got) != 2 { + t.Fatalf("expected 2 problems, got %d", len(got)) + } + if got[0] != p1 { + t.Errorf("first problem mismatch: got %+v, want %+v", got[0], p1) + } + if got[1] != p2 { + t.Errorf("second problem mismatch: got %+v, want %+v", got[1], p2) + } +} + +// TestIMP_CLI_022_EmptyCollectorAllReturnsEmptySlice verifies that a fresh +// Collector returns an empty (non-nil) slice from All(). +func TestIMP_CLI_022_EmptyCollectorAllReturnsEmptySlice(t *testing.T) { + c := problems.NewCollector() + got := c.All() + if got == nil { + t.Fatal("All() returned nil; want empty non-nil slice") + } + if len(got) != 0 { + t.Fatalf("expected 0 problems, got %d", len(got)) + } +} + +// TestIMP_CLI_022_HasErrorsFalseWhenOnlyWarnings verifies that HasErrors returns +// false when only warning-severity problems are present. +// Requirement: IMP-CLI-022 (severity classification). +func TestIMP_CLI_022_HasErrorsFalseWhenOnlyWarnings(t *testing.T) { + c := problems.NewCollector() + c.Add(models.Problem{ + Severity: models.SeverityWarning, + Category: models.CategoryConflict, + ResourceRef: "ns/binding-c", + Description: "Scan config already exists", + FixHint: "Delete the existing config and re-run.", + Skipped: true, + }) + if c.HasErrors() { + t.Error("HasErrors() returned true; expected false when only warnings present") + } +} + +// TestIMP_CLI_022_HasErrorsTrueWhenAnyErrorSeverity verifies that HasErrors +// returns true as soon as any error-severity problem is added. +// Requirement: IMP-CLI-022 (severity classification drives exit code logic). +func TestIMP_CLI_022_HasErrorsTrueWhenAnyErrorSeverity(t *testing.T) { + c := problems.NewCollector() + // Add a warning first to ensure we check all entries, not just the last. + c.Add(models.Problem{ + Severity: models.SeverityWarning, + Category: models.CategoryMapping, + ResourceRef: "ns/binding-d", + Description: "Schedule conversion warning", + FixHint: "Use a standard cron expression.", + Skipped: false, + }) + c.Add(models.Problem{ + Severity: models.SeverityError, + Category: models.CategoryAPI, + ResourceRef: "ns/binding-e", + Description: "API returned 400 Bad Request", + FixHint: "Check that the payload is valid and the cluster ID exists.", + Skipped: true, + }) + if !c.HasErrors() { + t.Error("HasErrors() returned false; expected true when error-severity problem is present") + } +} + +// TestIMP_CLI_022_HasErrorsFalseOnEmptyCollector verifies that an empty +// Collector reports no errors. +func TestIMP_CLI_022_HasErrorsFalseOnEmptyCollector(t *testing.T) { + c := problems.NewCollector() + if c.HasErrors() { + t.Error("HasErrors() returned true on empty collector; expected false") + } +} + +// TestIMP_CLI_022_AllReturnsCopy verifies that mutations to the returned slice +// do not affect the Collector's internal state. +func TestIMP_CLI_022_AllReturnsCopy(t *testing.T) { + c := problems.NewCollector() + c.Add(models.Problem{ + Severity: models.SeverityError, + Category: models.CategoryAPI, + ResourceRef: "ns/binding-f", + Description: "Transient API failure", + FixHint: "Increase --max-retries or check ACS health.", + Skipped: true, + }) + + got := c.All() + got[0].Description = "mutated" + + // Second call must return the original value. + fresh := c.All() + if fresh[0].Description == "mutated" { + t.Error("All() returned a reference to internal state; expected an independent copy") + } +} diff --git a/scripts/compliance-operator-importer/internal/reconcile/create_only.go b/scripts/compliance-operator-importer/internal/reconcile/create_only.go new file mode 100644 index 0000000000000..e572b537e53eb --- /dev/null +++ b/scripts/compliance-operator-importer/internal/reconcile/create_only.go @@ -0,0 +1,265 @@ +// Package reconcile implements the reconciliation loop that can either create-only +// or create-or-update scan configurations based on the overwriteExisting setting. +package reconcile + +import ( + "context" + "fmt" + "time" + + "github.com/stackrox/co-acs-importer/internal/models" +) + +// transientStatusCodes is the set of HTTP status codes that should trigger a retry. +// Non-transient codes (400, 401, 403, 404) are NOT in this set and cause immediate failure. +// +// Implements IMP-ERR-001 (retry) and IMP-ERR-002 (no retry). +var transientStatusCodes = map[int]bool{ + 429: true, + 502: true, + 503: true, + 504: true, +} + +// statusCoder is the interface satisfied by acs.HTTPError (and the test statusError). +// It lets the reconciler inspect the HTTP status without importing the acs package, +// avoiding an import cycle. +type statusCoder interface { + StatusCode() int +} + +// Action records the outcome of a single Apply call. +type Action struct { + Source models.ReportItemSource + ActionType string // "create" | "skip" | "fail" + Reason string + Attempts int + ACSScanConfigID string + Err error + Problem *models.Problem +} + +// Reconciler implements the reconciliation loop. +// When overwriteExisting=false, existing scan names are skipped with a conflict problem. +// When overwriteExisting=true, existing scan names are updated via PUT. +type Reconciler struct { + client models.ACSClient + maxRetries int + dryRun bool + overwriteExisting bool +} + +// NewReconciler creates a Reconciler. +// +// - client: ACS API client supporting both POST and PUT operations +// - maxRetries: maximum total attempts for a single create/update (must be >= 1) +// - dryRun: when true, no POST/PUT is issued; planned actions are still recorded +// - overwriteExisting: when true, existing configs are updated via PUT instead of skipped +func NewReconciler(client models.ACSClient, maxRetries int, dryRun bool, overwriteExisting bool) *Reconciler { + if maxRetries < 1 { + maxRetries = 1 + } + return &Reconciler{ + client: client, + maxRetries: maxRetries, + dryRun: dryRun, + overwriteExisting: overwriteExisting, + } +} + +// Apply tries to create or update the scan config based on whether scanName exists in existingNames. +// +// Behaviour: +// - If dryRun=true: records planned action, no POST/PUT is issued. (IMP-IDEM-004, IMP-IDEM-006) +// - If scanName exists and overwriteExisting=false: skip + conflict problem. (IMP-IDEM-002, IMP-IDEM-003) +// - If scanName exists and overwriteExisting=true: update via PUT. (IMP-IDEM-008) +// - If scanName not exists: create via POST regardless of overwriteExisting. (IMP-IDEM-009) +// - Transient failures (429,502,503,504): retry with exponential backoff. (IMP-ERR-001) +// - Non-transient failures (400,401,403,404): record as fail immediately. (IMP-ERR-002) +// +// Exponential backoff: base=500ms, doubles each retry; up to maxRetries total attempts. +// Attempts count is always recorded in the returned Action. +// +// existingNames maps scanName -> configID so we know the ID for PUT operations. +func (r *Reconciler) Apply( + ctx context.Context, + payload models.ACSCreatePayload, + source models.ReportItemSource, + existingNames map[string]string, +) Action { + action := Action{Source: source} + + existingID, nameExists := existingNames[payload.ScanName] + + // Handle existing name based on overwriteExisting setting + if nameExists { + if !r.overwriteExisting { + // IMP-IDEM-002: existing name and overwriteExisting=false => skip with conflict problem + // IMP-IDEM-003: no PUT is attempted when overwriteExisting=false + problem := &models.Problem{ + Severity: models.SeverityWarning, + Category: models.CategoryConflict, + ResourceRef: resourceRef(source), + Description: fmt.Sprintf("scan configuration %q already exists in ACS and will not be updated (create-only mode)", payload.ScanName), + FixHint: fmt.Sprintf("Remove the existing ACS scan configuration named %q before re-running, or use --overwrite-existing flag, or rename the ScanSettingBinding to use a different name.", payload.ScanName), + Skipped: true, + } + action.ActionType = "skip" + action.Reason = fmt.Sprintf("scan configuration %q already exists in ACS", payload.ScanName) + action.Problem = problem + return action + } + + // IMP-IDEM-008: overwriteExisting=true and name exists => update via PUT + if r.dryRun { + action.ActionType = "update" + action.ACSScanConfigID = existingID + action.Reason = "dry-run: would PUT /v2/compliance/scan/configurations/" + existingID + action.Attempts = 0 + return action + } + + // Perform update with retry logic + var ( + lastErr error + delay = 500 * time.Millisecond + ) + + for attempt := 1; attempt <= r.maxRetries; attempt++ { + action.Attempts = attempt + + lastErr = r.client.UpdateScanConfiguration(ctx, existingID, payload) + if lastErr == nil { + action.ActionType = "update" + action.ACSScanConfigID = existingID + action.Reason = "scan configuration updated successfully" + return action + } + + // Check if the error is transient (eligible for retry) + if sc, ok := asStatusCoder(lastErr); ok { + code := sc.StatusCode() + if !transientStatusCodes[code] { + // Non-transient: fail immediately, no more attempts + action.ActionType = "fail" + action.Reason = fmt.Sprintf("non-transient HTTP %d error updating scan configuration: %v", code, lastErr) + action.Err = lastErr + return action + } + } + + // Do not sleep after the last attempt + if attempt < r.maxRetries { + select { + case <-ctx.Done(): + action.ActionType = "fail" + action.Reason = "context cancelled during retry backoff" + action.Err = ctx.Err() + return action + case <-time.After(delay): + } + delay *= 2 + } + } + + // Exhausted all retries for update + action.ActionType = "fail" + action.Reason = fmt.Sprintf("failed to update after %d attempt(s): %v", action.Attempts, lastErr) + action.Err = lastErr + return action + } + + // IMP-IDEM-009: name not exists => create via POST regardless of overwriteExisting flag + // IMP-IDEM-004: dry-run => record planned action, do not POST + // IMP-IDEM-006: planned action "create" is still recorded + if r.dryRun { + action.ActionType = "create" + action.Reason = "dry-run: would POST /v2/compliance/scan/configurations" + action.Attempts = 0 + return action + } + + // IMP-IDEM-001: POST /v2/compliance/scan/configurations when name not found + // IMP-ERR-001: retry on transient errors with exponential backoff + // IMP-ERR-002: no retry on non-transient errors + var ( + lastErr error + id string + delay = 500 * time.Millisecond + ) + + for attempt := 1; attempt <= r.maxRetries; attempt++ { + action.Attempts = attempt + + id, lastErr = r.client.CreateScanConfiguration(ctx, payload) + if lastErr == nil { + action.ActionType = "create" + action.ACSScanConfigID = id + action.Reason = "scan configuration created successfully" + return action + } + + // Check if the error is transient (eligible for retry) + if sc, ok := asStatusCoder(lastErr); ok { + code := sc.StatusCode() + if !transientStatusCodes[code] { + // Non-transient: fail immediately, no more attempts + action.ActionType = "fail" + action.Reason = fmt.Sprintf("non-transient HTTP %d error creating scan configuration: %v", code, lastErr) + action.Err = lastErr + return action + } + } else { + // Unknown error type (e.g. network error): treat as transient and retry + } + + // Do not sleep after the last attempt + if attempt < r.maxRetries { + select { + case <-ctx.Done(): + action.ActionType = "fail" + action.Reason = "context cancelled during retry backoff" + action.Err = ctx.Err() + return action + case <-time.After(delay): + } + delay *= 2 + } + } + + // Exhausted all retries + action.ActionType = "fail" + action.Reason = fmt.Sprintf("failed after %d attempt(s): %v", action.Attempts, lastErr) + action.Err = lastErr + return action +} + +// resourceRef formats the source as "namespace/bindingName" for use in Problem.ResourceRef. +func resourceRef(source models.ReportItemSource) string { + if source.Namespace == "" { + return source.BindingName + } + return source.Namespace + "/" + source.BindingName +} + +// asStatusCoder attempts to extract a statusCoder from err using errors.As-style +// type assertion. It handles both direct and wrapped errors. +func asStatusCoder(err error) (statusCoder, bool) { + // Direct type assertion first (most common path) + if sc, ok := err.(statusCoder); ok { + return sc, true + } + // Unwrap chain + type unwrapper interface{ Unwrap() error } + for err != nil { + if sc, ok := err.(statusCoder); ok { + return sc, true + } + uw, ok := err.(unwrapper) + if !ok { + break + } + err = uw.Unwrap() + } + return nil, false +} diff --git a/scripts/compliance-operator-importer/internal/reconcile/create_only_test.go b/scripts/compliance-operator-importer/internal/reconcile/create_only_test.go new file mode 100644 index 0000000000000..244954beac9f0 --- /dev/null +++ b/scripts/compliance-operator-importer/internal/reconcile/create_only_test.go @@ -0,0 +1,403 @@ +package reconcile_test + +import ( + "context" + "errors" + "fmt" + "sync/atomic" + "testing" + + "github.com/stackrox/co-acs-importer/internal/models" + "github.com/stackrox/co-acs-importer/internal/reconcile" +) + +// --------------------------------------------------------------------------- +// Mock ACS client +// --------------------------------------------------------------------------- + +// mockACSClient is a test double that records every call and allows the caller +// to inject per-call responses via the nextResponses queue. +type mockACSClient struct { + // createResponses is consumed in order; each entry is either nil (success) + // or an error. Use statusError to encode HTTP status codes. + createResponses []error + + // updateResponses is consumed in order for PUT calls. + updateResponses []error + + // callCount tracks total calls to CreateScanConfiguration. + callCount atomic.Int32 + + // updateCallCount tracks total calls to UpdateScanConfiguration. + updateCallCount atomic.Int32 + + // recordedIDCounter is used to return unique IDs on success. + idCounter atomic.Int32 + + // listConfigs is the fixed list returned by ListScanConfigurations. + listConfigs []models.ACSConfigSummary +} + +// statusError wraps an HTTP status code so the reconciler can distinguish +// transient (429/502/503/504) from non-transient (400/401/403/404) failures. +type statusError struct { + code int +} + +func (e *statusError) Error() string { return fmt.Sprintf("HTTP %d", e.code) } +func (e *statusError) StatusCode() int { return e.code } + +func (m *mockACSClient) Preflight(_ context.Context) error { return nil } + +func (m *mockACSClient) ListScanConfigurations(_ context.Context) ([]models.ACSConfigSummary, error) { + return m.listConfigs, nil +} + +func (m *mockACSClient) CreateScanConfiguration(_ context.Context, _ models.ACSCreatePayload) (string, error) { + idx := int(m.callCount.Add(1)) - 1 + if idx < len(m.createResponses) { + if err := m.createResponses[idx]; err != nil { + return "", err + } + } + id := fmt.Sprintf("created-id-%d", m.idCounter.Add(1)) + return id, nil +} + +func (m *mockACSClient) UpdateScanConfiguration(_ context.Context, _ string, _ models.ACSCreatePayload) error { + idx := int(m.updateCallCount.Add(1)) - 1 + if idx < len(m.updateResponses) { + if err := m.updateResponses[idx]; err != nil { + return err + } + } + return nil +} + +func (m *mockACSClient) ListClusters(_ context.Context) ([]models.ACSClusterInfo, error) { + // Not used in reconcile tests, return empty list + return []models.ACSClusterInfo{}, nil +} + +// --------------------------------------------------------------------------- +// Helpers +// --------------------------------------------------------------------------- + +func defaultSource() models.ReportItemSource { + return models.ReportItemSource{ + Namespace: "openshift-compliance", + BindingName: "cis-weekly", + ScanSettingName: "default-auto-apply", + } +} + +func defaultPayload(scanName string) models.ACSCreatePayload { + return models.ACSCreatePayload{ + ScanName: scanName, + ScanConfig: models.ACSBaseScanConfig{ + Profiles: []string{"ocp4-cis"}, + Description: "test", + }, + Clusters: []string{"cluster-a"}, + } +} + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +// IMP-IDEM-001: non-existing name => POST called, action="create" +func TestApply_IMP_IDEM_001_NewName_CreatesConfig(t *testing.T) { + mock := &mockACSClient{} + r := reconcile.NewReconciler(mock, 3, false, false) + + action := r.Apply(context.Background(), defaultPayload("new-scan"), defaultSource(), map[string]string{}) + + if action.ActionType != "create" { + t.Errorf("IMP-IDEM-001: expected action 'create', got %q", action.ActionType) + } + if action.ACSScanConfigID == "" { + t.Error("IMP-IDEM-001: expected non-empty ACSScanConfigID after create") + } + if action.Err != nil { + t.Errorf("IMP-IDEM-001: unexpected error: %v", action.Err) + } + if mock.callCount.Load() != 1 { + t.Errorf("IMP-IDEM-001: expected 1 POST call, got %d", mock.callCount.Load()) + } +} + +// IMP-IDEM-002: existing name => action="skip", Problem.Category=conflict, FixHint non-empty +func TestApply_IMP_IDEM_002_ExistingName_SkipsWithConflictProblem(t *testing.T) { + mock := &mockACSClient{} + r := reconcile.NewReconciler(mock, 3, false, false) + + existing := map[string]string{"cis-weekly": "existing-id-123"} + action := r.Apply(context.Background(), defaultPayload("cis-weekly"), defaultSource(), existing) + + if action.ActionType != "skip" { + t.Errorf("IMP-IDEM-002: expected action 'skip', got %q", action.ActionType) + } + if action.Problem == nil { + t.Fatal("IMP-IDEM-002: expected Problem to be non-nil for skipped-existing") + } + if action.Problem.Category != models.CategoryConflict { + t.Errorf("IMP-IDEM-002: expected Problem.Category 'conflict', got %q", action.Problem.Category) + } + if action.Problem.FixHint == "" { + t.Error("IMP-IDEM-002: expected non-empty Problem.FixHint") + } + if action.Reason == "" { + t.Error("IMP-IDEM-002: expected non-empty Reason") + } + // "already exists" must appear in the reason (per spec) + if !containsSubstring(action.Reason, "already exists") { + t.Errorf("IMP-IDEM-002: Reason must include 'already exists', got %q", action.Reason) + } +} + +// IMP-IDEM-003: verify no PUT called when overwriteExisting=false (default mode) +func TestApply_IMP_IDEM_003_DefaultMode_NoPUT(t *testing.T) { + mock := &mockACSClient{} + r := reconcile.NewReconciler(mock, 3, false, false) // overwriteExisting=false + + // Run multiple scenarios - none should trigger a PUT + for _, scanName := range []string{"new-scan-1", "new-scan-2"} { + _ = r.Apply(context.Background(), defaultPayload(scanName), defaultSource(), map[string]string{}) + } + // existing name - should skip, not PUT + _ = r.Apply(context.Background(), defaultPayload("existing"), defaultSource(), map[string]string{"existing": "existing-id"}) + + // 2 creates + 1 skip = 2 POST calls total, 0 PUT calls + if mock.callCount.Load() != 2 { + t.Errorf("IMP-IDEM-003: expected exactly 2 POST calls (2 creates), got %d", mock.callCount.Load()) + } + if mock.updateCallCount.Load() != 0 { + t.Errorf("IMP-IDEM-003: expected 0 PUT calls when overwriteExisting=false, got %d", mock.updateCallCount.Load()) + } +} + +// IMP-IDEM-004: dryRun=true => no POST +func TestApply_IMP_IDEM_004_DryRun_NoPost(t *testing.T) { + mock := &mockACSClient{} + r := reconcile.NewReconciler(mock, 3, true, false) // dryRun=true + + _ = r.Apply(context.Background(), defaultPayload("new-scan"), defaultSource(), map[string]string{}) + + if mock.callCount.Load() != 0 { + t.Errorf("IMP-IDEM-004: expected 0 POST calls in dry-run mode, got %d", mock.callCount.Load()) + } +} + +// IMP-IDEM-005: dryRun=true => no PUT (even with overwriteExisting=true) +func TestApply_IMP_IDEM_005_DryRun_NoPut(t *testing.T) { + mock := &mockACSClient{} + r := reconcile.NewReconciler(mock, 3, true, true) // dryRun=true, overwriteExisting=true + + existing := map[string]string{"existing-scan": "existing-id"} + _ = r.Apply(context.Background(), defaultPayload("existing-scan"), defaultSource(), existing) + + if mock.updateCallCount.Load() != 0 { + t.Errorf("IMP-IDEM-005: expected 0 PUT calls in dry-run mode, got %d", mock.updateCallCount.Load()) + } + if mock.callCount.Load() != 0 { + t.Errorf("IMP-IDEM-005: expected 0 POST calls in dry-run mode, got %d", mock.callCount.Load()) + } +} + +// IMP-IDEM-006: dryRun => action="create" still recorded as planned +func TestApply_IMP_IDEM_006_DryRun_PlannedCreateRecorded(t *testing.T) { + mock := &mockACSClient{} + r := reconcile.NewReconciler(mock, 3, true, false) // dryRun=true + + action := r.Apply(context.Background(), defaultPayload("new-scan"), defaultSource(), map[string]string{}) + + if action.ActionType != "create" { + t.Errorf("IMP-IDEM-006: dry-run planned action should be 'create', got %q", action.ActionType) + } +} + +// IMP-IDEM-007: dryRun => problems still populated for problematic resources +func TestApply_IMP_IDEM_007_DryRun_ProblemsStillPopulated(t *testing.T) { + mock := &mockACSClient{} + r := reconcile.NewReconciler(mock, 3, true, false) // dryRun=true + + existing := map[string]string{"cis-weekly": "existing-id-123"} + action := r.Apply(context.Background(), defaultPayload("cis-weekly"), defaultSource(), existing) + + if action.Problem == nil { + t.Fatal("IMP-IDEM-007: expected Problem to be populated even in dry-run mode") + } + if action.Problem.Category != models.CategoryConflict { + t.Errorf("IMP-IDEM-007: expected conflict problem in dry-run, got %q", action.Problem.Category) + } +} + +// IMP-ERR-001: 429 first 2 times then 200 => 3 total attempts +func TestApply_IMP_ERR_001_Retry429_ThenSuccess(t *testing.T) { + mock := &mockACSClient{ + createResponses: []error{ + &statusError{code: 429}, + &statusError{code: 429}, + nil, // 3rd attempt succeeds + }, + } + r := reconcile.NewReconciler(mock, 5, false, false) + + action := r.Apply(context.Background(), defaultPayload("new-scan"), defaultSource(), map[string]string{}) + + if action.ActionType != "create" { + t.Errorf("IMP-ERR-001: expected action 'create' after retry success, got %q", action.ActionType) + } + if action.Attempts != 3 { + t.Errorf("IMP-ERR-001: expected 3 total attempts, got %d", action.Attempts) + } + if action.Err != nil { + t.Errorf("IMP-ERR-001: expected nil error after eventual success, got %v", action.Err) + } +} + +// IMP-ERR-001: Retry on transient errors 502, 503, 504 +func TestApply_IMP_ERR_001_Retry5xx_ThenSuccess(t *testing.T) { + for _, code := range []int{502, 503, 504} { + code := code + t.Run(fmt.Sprintf("HTTP%d", code), func(t *testing.T) { + mock := &mockACSClient{ + createResponses: []error{ + &statusError{code: code}, + &statusError{code: code}, + nil, // 3rd succeeds + }, + } + r := reconcile.NewReconciler(mock, 5, false, false) + + action := r.Apply(context.Background(), defaultPayload("new-scan"), defaultSource(), map[string]string{}) + + if action.ActionType != "create" { + t.Errorf("IMP-ERR-001: HTTP %d - expected 'create', got %q", code, action.ActionType) + } + if action.Attempts != 3 { + t.Errorf("IMP-ERR-001: HTTP %d - expected 3 attempts, got %d", code, action.Attempts) + } + }) + } +} + +// IMP-ERR-002: 400 => 1 attempt only, action="fail" +func TestApply_IMP_ERR_002_NonTransient400_NoRetry(t *testing.T) { + for _, code := range []int{400, 401, 403, 404} { + code := code + t.Run(fmt.Sprintf("HTTP%d", code), func(t *testing.T) { + mock := &mockACSClient{ + createResponses: []error{ + &statusError{code: code}, + }, + } + r := reconcile.NewReconciler(mock, 5, false, false) + + action := r.Apply(context.Background(), defaultPayload("new-scan"), defaultSource(), map[string]string{}) + + if action.ActionType != "fail" { + t.Errorf("IMP-ERR-002: HTTP %d - expected action 'fail', got %q", code, action.ActionType) + } + if action.Attempts != 1 { + t.Errorf("IMP-ERR-002: HTTP %d - expected exactly 1 attempt (no retry), got %d", code, action.Attempts) + } + if mock.callCount.Load() != 1 { + t.Errorf("IMP-ERR-002: HTTP %d - expected 1 POST call, got %d", code, mock.callCount.Load()) + } + if action.Err == nil { + t.Errorf("IMP-ERR-002: HTTP %d - expected non-nil error", code) + } + }) + } +} + +// --------------------------------------------------------------------------- +// Compile-time check: mockACSClient satisfies models.ACSClient +// This fails to compile if models.ACSClient gains any method not implemented +// by the mock, making interface drift immediately visible. +// --------------------------------------------------------------------------- +var _ models.ACSClient = (*mockACSClient)(nil) + +// --------------------------------------------------------------------------- +// Utility +// --------------------------------------------------------------------------- + +func containsSubstring(s, sub string) bool { + return len(s) >= len(sub) && (s == sub || len(sub) == 0 || + func() bool { + for i := 0; i <= len(s)-len(sub); i++ { + if s[i:i+len(sub)] == sub { + return true + } + } + return false + }()) +} + +// Verify containsSubstring works correctly +var _ = func() bool { + if !containsSubstring("scan already exists in ACS", "already exists") { + panic("containsSubstring broken") + } + return true +}() + +// errorIs is a helper for unwrapping statusError from wrapped errors. +func errorIs(err error, code int) bool { + var se *statusError + return errors.As(err, &se) && se.code == code +} + +// keep errorIs in use +var _ = errorIs + +// IMP-IDEM-008: overwriteExisting=true, name exists => PUT called, action="update" +func TestApply_IMP_IDEM_008_OverwriteExisting_Updates(t *testing.T) { + mock := &mockACSClient{} + r := reconcile.NewReconciler(mock, 3, false, true) // overwriteExisting=true + + existing := map[string]string{"cis-weekly": "existing-config-id-789"} + action := r.Apply(context.Background(), defaultPayload("cis-weekly"), defaultSource(), existing) + + if action.ActionType != "update" { + t.Errorf("IMP-IDEM-008: expected action 'update', got %q", action.ActionType) + } + if action.ACSScanConfigID != "existing-config-id-789" { + t.Errorf("IMP-IDEM-008: expected existing ID preserved, got %q", action.ACSScanConfigID) + } + if action.Err != nil { + t.Errorf("IMP-IDEM-008: unexpected error: %v", action.Err) + } + if mock.updateCallCount.Load() != 1 { + t.Errorf("IMP-IDEM-008: expected 1 PUT call, got %d", mock.updateCallCount.Load()) + } + if mock.callCount.Load() != 0 { + t.Errorf("IMP-IDEM-008: expected 0 POST calls, got %d", mock.callCount.Load()) + } +} + +// IMP-IDEM-009: overwriteExisting=true, name not exists => POST called, action="create" +func TestApply_IMP_IDEM_009_OverwriteExisting_Creates(t *testing.T) { + mock := &mockACSClient{} + r := reconcile.NewReconciler(mock, 3, false, true) // overwriteExisting=true + + action := r.Apply(context.Background(), defaultPayload("new-scan"), defaultSource(), map[string]string{}) + + if action.ActionType != "create" { + t.Errorf("IMP-IDEM-009: expected action 'create', got %q", action.ActionType) + } + if action.ACSScanConfigID == "" { + t.Error("IMP-IDEM-009: expected non-empty ACSScanConfigID after create") + } + if action.Err != nil { + t.Errorf("IMP-IDEM-009: unexpected error: %v", action.Err) + } + if mock.callCount.Load() != 1 { + t.Errorf("IMP-IDEM-009: expected 1 POST call, got %d", mock.callCount.Load()) + } + if mock.updateCallCount.Load() != 0 { + t.Errorf("IMP-IDEM-009: expected 0 PUT calls, got %d", mock.updateCallCount.Load()) + } +} diff --git a/scripts/compliance-operator-importer/internal/report/report.go b/scripts/compliance-operator-importer/internal/report/report.go new file mode 100644 index 0000000000000..c5ab9f7e9e645 --- /dev/null +++ b/scripts/compliance-operator-importer/internal/report/report.go @@ -0,0 +1,104 @@ +// Package report assembles the final Report from accumulated run items and writes +// it to disk as indented JSON when --report-json is set. +package report + +import ( + "encoding/json" + "fmt" + "os" + "time" + + "github.com/stackrox/co-acs-importer/internal/models" +) + +// Builder accumulates per-binding ReportItems during a run and produces the +// final Report once all bindings have been processed. +type Builder struct { + cfg *models.Config + items []models.ReportItem +} + +// NewBuilder returns a Builder configured from cfg. +func NewBuilder(cfg *models.Config) *Builder { + return &Builder{cfg: cfg} +} + +// RecordItem appends a single binding outcome to the builder. +func (b *Builder) RecordItem(item models.ReportItem) { + b.items = append(b.items, item) +} + +// Build constructs the final Report from all recorded items and the supplied +// problems list. +// +// IMP-CLI-021: sets meta.mode based on cfg.OverwriteExisting, meta.timestamp to current UTC +// RFC3339, meta.dryRun from cfg, meta.namespaceScope from cfg. +// IMP-CLI-021: computes counts from items actions. +func (b *Builder) Build(problems []models.Problem) models.Report { + mode := "create-only" + if b.cfg.OverwriteExisting { + mode = "create-or-update" + } + + meta := models.ReportMeta{ + Timestamp: time.Now().UTC().Format(time.RFC3339), + DryRun: b.cfg.DryRun, + NamespaceScope: namespaceScope(b.cfg), + Mode: mode, + } + + counts := models.ReportCounts{ + Discovered: len(b.items), + } + for _, it := range b.items { + switch it.Action { + case "create": + counts.Create++ + case "update": + counts.Update++ + case "skip": + counts.Skip++ + case "fail": + counts.Failed++ + } + } + + items := b.items + if items == nil { + items = []models.ReportItem{} + } + if problems == nil { + problems = []models.Problem{} + } + + return models.Report{ + Meta: meta, + Counts: counts, + Items: items, + Problems: problems, + } +} + +// WriteJSON writes report as indented JSON to path. +// Returns an error if the file cannot be created or written. +// IMP-CLI-021: output must be valid, parseable JSON. +func (b *Builder) WriteJSON(path string, report models.Report) error { + data, err := json.MarshalIndent(report, "", " ") + if err != nil { + return fmt.Errorf("marshal report to JSON: %w", err) + } + // Append a trailing newline for POSIX text-file compliance. + data = append(data, '\n') + if err := os.WriteFile(path, data, 0o600); err != nil { + return fmt.Errorf("write report JSON to %q: %w", path, err) + } + return nil +} + +// namespaceScope derives the namespaceScope string from cfg. +func namespaceScope(cfg *models.Config) string { + if cfg.COAllNamespaces { + return "all-namespaces" + } + return cfg.CONamespace +} diff --git a/scripts/compliance-operator-importer/internal/report/report_test.go b/scripts/compliance-operator-importer/internal/report/report_test.go new file mode 100644 index 0000000000000..ca8ca12ae39dc --- /dev/null +++ b/scripts/compliance-operator-importer/internal/report/report_test.go @@ -0,0 +1,217 @@ +package report_test + +import ( + "encoding/json" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/stackrox/co-acs-importer/internal/models" + "github.com/stackrox/co-acs-importer/internal/report" +) + +// baseConfig returns a minimal Config suitable for most report tests. +func baseConfig() *models.Config { + return &models.Config{ + DryRun: false, + CONamespace: "openshift-compliance", + } +} + +// TestIMP_CLI_021_BuildSetsModeTocreateOnly verifies that Build always sets +// meta.mode to "create-only" regardless of other configuration. +// Requirement: IMP-CLI-021. +func TestIMP_CLI_021_BuildSetsModeTocreateOnly(t *testing.T) { + b := report.NewBuilder(baseConfig()) + r := b.Build(nil) + if r.Meta.Mode != "create-only" { + t.Errorf("meta.mode = %q; want %q", r.Meta.Mode, "create-only") + } +} + +// TestIMP_CLI_021_BuildCountsFromItemActions verifies that Build correctly derives +// discovered/create/skip/failed counts from the recorded items. +// Requirement: IMP-CLI-021. +func TestIMP_CLI_021_BuildCountsFromItemActions(t *testing.T) { + cfg := baseConfig() + b := report.NewBuilder(cfg) + + b.RecordItem(models.ReportItem{Action: "create"}) + b.RecordItem(models.ReportItem{Action: "create"}) + b.RecordItem(models.ReportItem{Action: "skip"}) + b.RecordItem(models.ReportItem{Action: "fail"}) + + r := b.Build(nil) + + if r.Counts.Discovered != 4 { + t.Errorf("counts.discovered = %d; want 4", r.Counts.Discovered) + } + if r.Counts.Create != 2 { + t.Errorf("counts.create = %d; want 2", r.Counts.Create) + } + if r.Counts.Skip != 1 { + t.Errorf("counts.skip = %d; want 1", r.Counts.Skip) + } + if r.Counts.Failed != 1 { + t.Errorf("counts.failed = %d; want 1", r.Counts.Failed) + } +} + +// TestIMP_CLI_021_BuildMetaNamespaceScopeAllNamespaces verifies that when +// COAllNamespaces is set, meta.namespaceScope is "all-namespaces". +// Requirement: IMP-CLI-021. +func TestIMP_CLI_021_BuildMetaNamespaceScopeAllNamespaces(t *testing.T) { + cfg := &models.Config{ + COAllNamespaces: true, + } + b := report.NewBuilder(cfg) + r := b.Build(nil) + if r.Meta.NamespaceScope != "all-namespaces" { + t.Errorf("meta.namespaceScope = %q; want %q", r.Meta.NamespaceScope, "all-namespaces") + } +} + +// TestIMP_CLI_021_BuildMetaNamespaceScopeSingleNamespace verifies that when +// COAllNamespaces is false, meta.namespaceScope equals cfg.CONamespace. +// Requirement: IMP-CLI-021. +func TestIMP_CLI_021_BuildMetaNamespaceScopeSingleNamespace(t *testing.T) { + cfg := &models.Config{ + CONamespace: "openshift-compliance", + COAllNamespaces: false, + } + b := report.NewBuilder(cfg) + r := b.Build(nil) + if r.Meta.NamespaceScope != "openshift-compliance" { + t.Errorf("meta.namespaceScope = %q; want %q", r.Meta.NamespaceScope, "openshift-compliance") + } +} + +// TestIMP_CLI_021_BuildMetaDryRunReflectsCfg verifies that meta.dryRun mirrors +// the cfg.DryRun field. +// Requirement: IMP-CLI-021. +func TestIMP_CLI_021_BuildMetaDryRunReflectsCfg(t *testing.T) { + for _, dryRun := range []bool{true, false} { + cfg := &models.Config{DryRun: dryRun, CONamespace: "ns"} + b := report.NewBuilder(cfg) + r := b.Build(nil) + if r.Meta.DryRun != dryRun { + t.Errorf("dryRun=%v: meta.dryRun = %v; want %v", dryRun, r.Meta.DryRun, dryRun) + } + } +} + +// TestIMP_CLI_021_BuildTimestampIsRFC3339 verifies that meta.timestamp is a +// non-empty, valid RFC3339 string. +// Requirement: IMP-CLI-021. +func TestIMP_CLI_021_BuildTimestampIsRFC3339(t *testing.T) { + b := report.NewBuilder(baseConfig()) + r := b.Build(nil) + if r.Meta.Timestamp == "" { + t.Fatal("meta.timestamp is empty") + } + // time.Parse with RFC3339 format validates the string. + // We use strings.Contains as a lightweight check; a full parse would need + // importing "time" and would be equally valid. + if !strings.Contains(r.Meta.Timestamp, "T") || !strings.Contains(r.Meta.Timestamp, "Z") { + t.Errorf("meta.timestamp %q does not look like UTC RFC3339", r.Meta.Timestamp) + } +} + +// TestIMP_CLI_021_WriteJSONProducesValidJSON verifies that WriteJSON writes +// parseable JSON to disk. +// Requirement: IMP-CLI-021. +func TestIMP_CLI_021_WriteJSONProducesValidJSON(t *testing.T) { + cfg := baseConfig() + b := report.NewBuilder(cfg) + b.RecordItem(models.ReportItem{Action: "create", Reason: "created successfully"}) + + r := b.Build(nil) + + dir := t.TempDir() + path := filepath.Join(dir, "report.json") + if err := b.WriteJSON(path, r); err != nil { + t.Fatalf("WriteJSON returned error: %v", err) + } + + data, err := os.ReadFile(path) + if err != nil { + t.Fatalf("reading written report: %v", err) + } + + var parsed models.Report + if err := json.Unmarshal(data, &parsed); err != nil { + t.Fatalf("written JSON is not parseable: %v\ncontent:\n%s", err, string(data)) + } + + if parsed.Meta.Mode != "create-only" { + t.Errorf("parsed meta.mode = %q; want %q", parsed.Meta.Mode, "create-only") + } + if parsed.Counts.Discovered != 1 { + t.Errorf("parsed counts.discovered = %d; want 1", parsed.Counts.Discovered) + } +} + +// TestIMP_CLI_022_ProblemsInReportMatchInput verifies that problems passed to +// Build() appear unchanged in the report's Problems field. +// Requirement: IMP-CLI-022. +func TestIMP_CLI_022_ProblemsInReportMatchInput(t *testing.T) { + cfg := baseConfig() + b := report.NewBuilder(cfg) + + probs := []models.Problem{ + { + Severity: models.SeverityError, + Category: models.CategoryAPI, + ResourceRef: "ns/binding-a", + Description: "ACS API returned 503", + FixHint: "Check ACS endpoint health and retry.", + Skipped: true, + }, + { + Severity: models.SeverityWarning, + Category: models.CategoryConflict, + ResourceRef: "ns/binding-b", + Description: "Scan config already exists", + FixHint: "Delete the existing ACS config and re-run.", + Skipped: true, + }, + } + + r := b.Build(probs) + + if len(r.Problems) != 2 { + t.Fatalf("expected 2 problems in report, got %d", len(r.Problems)) + } + for i, want := range probs { + got := r.Problems[i] + if got != want { + t.Errorf("problem[%d] mismatch: got %+v, want %+v", i, got, want) + } + } +} + +// TestIMP_CLI_021_WriteJSONErrorOnBadPath verifies WriteJSON returns an error +// when the target directory does not exist. +func TestIMP_CLI_021_WriteJSONErrorOnBadPath(t *testing.T) { + b := report.NewBuilder(baseConfig()) + r := b.Build(nil) + err := b.WriteJSON("/nonexistent/dir/report.json", r) + if err == nil { + t.Error("expected error writing to non-existent path, got nil") + } +} + +// TestIMP_CLI_021_BuildEmptyItemsProducesNonNilSlices verifies that Build +// returns non-nil Items and Problems slices even when nothing was recorded. +// This ensures JSON output is "items": [] not "items": null. +func TestIMP_CLI_021_BuildEmptyItemsProducesNonNilSlices(t *testing.T) { + b := report.NewBuilder(baseConfig()) + r := b.Build(nil) + if r.Items == nil { + t.Error("Items is nil; want empty non-nil slice so JSON marshals as []") + } + if r.Problems == nil { + t.Error("Problems is nil; want empty non-nil slice so JSON marshals as []") + } +} diff --git a/scripts/compliance-operator-importer/internal/run/cluster_source.go b/scripts/compliance-operator-importer/internal/run/cluster_source.go new file mode 100644 index 0000000000000..1796aaca501a1 --- /dev/null +++ b/scripts/compliance-operator-importer/internal/run/cluster_source.go @@ -0,0 +1,172 @@ +package run + +import ( + "context" + "errors" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/stackrox/co-acs-importer/internal/cofetch" + "github.com/stackrox/co-acs-importer/internal/discover" + "github.com/stackrox/co-acs-importer/internal/models" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" +) + +// ClusterSource represents a single source cluster with its CO client and ACS cluster ID. +type ClusterSource struct { + Label string // context name, for logging + COClient cofetch.COClient + ACSClusterID string +} + +// contextRef pairs a kubeconfig file with one of its contexts. +type contextRef struct { + Context string + KubeconfigFile string +} + +// BuildClusterSources creates ClusterSource entries by loading each kubeconfig +// file independently (no merging). If cfg.Contexts is non-empty, only matching +// contexts are used. +func BuildClusterSources(ctx context.Context, cfg *models.Config, acsClient models.ACSClient) ([]ClusterSource, error) { + allRefs, err := listContextRefs() + if err != nil { + return nil, err + } + + refs := allRefs + if len(cfg.Contexts) > 0 { + refs = filterRefs(allRefs, cfg.Contexts) + if len(refs) == 0 { + return nil, fmt.Errorf("none of the requested --context values match available contexts %v", contextNames(allRefs)) + } + } + + var sources []ClusterSource + for _, ref := range refs { + restCfg, err := restConfigForRef(ref) + if err != nil { + return nil, fmt.Errorf("build rest config for context %q: %w", ref.Context, err) + } + + coClient, err := cofetch.NewClientFromRestConfig(restCfg, cfg.CONamespace, cfg.COAllNamespaces) + if err != nil { + return nil, fmt.Errorf("create CO client for context %q: %w", ref.Context, err) + } + + dynClient, err := dynamic.NewForConfig(restCfg) + if err != nil { + return nil, fmt.Errorf("build dynamic client for context %q: %w", ref.Context, err) + } + + acsClusterID, err := discover.DiscoverClusterID(ctx, discover.NewK8sDiscoveryClient(dynClient), acsClient) + if err != nil { + return nil, fmt.Errorf("discover cluster ID for context %q: %w", ref.Context, err) + } + + sources = append(sources, ClusterSource{ + Label: ref.Context, + COClient: coClient, + ACSClusterID: acsClusterID, + }) + } + + if len(sources) == 0 { + return nil, errors.New("no contexts found in kubeconfig") + } + return sources, nil +} + +// filterRefs returns refs whose context name appears in the wanted set. +func filterRefs(all []contextRef, wanted []string) []contextRef { + set := make(map[string]bool, len(wanted)) + for _, w := range wanted { + set[w] = true + } + var result []contextRef + for _, r := range all { + if set[r.Context] { + result = append(result, r) + } + } + return result +} + +func contextNames(refs []contextRef) []string { + names := make([]string, len(refs)) + for i, r := range refs { + names[i] = r.Context + } + return names +} + +// listContextRefs enumerates contexts from each kubeconfig file independently. +// Each file is loaded in isolation so that user/cluster entries with the same +// name in different files don't collide. +func listContextRefs() ([]contextRef, error) { + files := kubeconfigFiles() + if len(files) == 0 { + return nil, errors.New("no kubeconfig files found (check KUBECONFIG or ~/.kube/config)") + } + + var refs []contextRef + for _, file := range files { + cfg, err := clientcmd.LoadFromFile(file) + if err != nil { + return nil, fmt.Errorf("load kubeconfig %q: %w", file, err) + } + for ctxName := range cfg.Contexts { + refs = append(refs, contextRef{Context: ctxName, KubeconfigFile: file}) + } + } + + if len(refs) == 0 { + return nil, errors.New("no contexts found in kubeconfig files") + } + return refs, nil +} + +// kubeconfigFiles returns the list of kubeconfig file paths from the KUBECONFIG +// env var, or falls back to ~/.kube/config. +func kubeconfigFiles() []string { + env := os.Getenv(clientcmd.RecommendedConfigPathEnvVar) + if env == "" { + home, err := os.UserHomeDir() + if err != nil { + return nil + } + defaultPath := filepath.Join(home, clientcmd.RecommendedHomeDir, clientcmd.RecommendedFileName) + if _, err := os.Stat(defaultPath); err == nil { + return []string{defaultPath} + } + return nil + } + + parts := filepath.SplitList(env) + var files []string + for _, p := range parts { + p = strings.TrimSpace(p) + if p == "" { + continue + } + if _, err := os.Stat(p); err == nil { + files = append(files, p) + } + } + return files +} + +// restConfigForRef builds a rest.Config from a specific kubeconfig file and context, +// without merging with other kubeconfig files. +func restConfigForRef(ref contextRef) (*rest.Config, error) { + loadingRules := &clientcmd.ClientConfigLoadingRules{ + ExplicitPath: ref.KubeconfigFile, + } + overrides := &clientcmd.ConfigOverrides{CurrentContext: ref.Context} + kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, overrides) + return kubeConfig.ClientConfig() +} diff --git a/scripts/compliance-operator-importer/internal/run/cluster_source_test.go b/scripts/compliance-operator-importer/internal/run/cluster_source_test.go new file mode 100644 index 0000000000000..4d591d7fad107 --- /dev/null +++ b/scripts/compliance-operator-importer/internal/run/cluster_source_test.go @@ -0,0 +1,183 @@ +package run + +import ( + "os" + "path/filepath" + "testing" + + "k8s.io/client-go/tools/clientcmd" +) + +// writeMinimalKubeconfig writes a kubeconfig with the given contexts to a file. +// Each context gets a unique cluster and user entry within the file. +func writeMinimalKubeconfig(t *testing.T, dir, filename string, contextNames []string) string { + t.Helper() + + var clusters, contexts, users string + for _, name := range contextNames { + clusterName := "cluster-" + name + userName := "user-" + name + clusters += ` +- cluster: + server: https://` + name + `.example.com:6443 + name: ` + clusterName + contexts += ` +- context: + cluster: ` + clusterName + ` + user: ` + userName + ` + name: ` + name + users += ` +- name: ` + userName + ` + user: + token: token-` + filename + `-` + name + } + + content := `apiVersion: v1 +kind: Config +clusters:` + clusters + ` +contexts:` + contexts + ` +current-context: ` + contextNames[0] + ` +users:` + users + ` +` + path := filepath.Join(dir, filename) + if err := os.WriteFile(path, []byte(content), 0600); err != nil { + t.Fatalf("write kubeconfig %s: %v", path, err) + } + return path +} + +// TestIMP_CLI_003_SingleFileAllContexts verifies that all contexts from a +// single kubeconfig file are discovered. +func TestIMP_CLI_003_SingleFileAllContexts(t *testing.T) { + dir := t.TempDir() + path := writeMinimalKubeconfig(t, dir, "config", []string{"ctx-a", "ctx-b"}) + t.Setenv(clientcmd.RecommendedConfigPathEnvVar, path) + + refs, err := listContextRefs() + if err != nil { + t.Fatalf("listContextRefs: %v", err) + } + if len(refs) != 2 { + t.Fatalf("expected 2 refs, got %d", len(refs)) + } + names := contextNames(refs) + for _, want := range []string{"ctx-a", "ctx-b"} { + found := false + for _, n := range names { + if n == want { + found = true + } + } + if !found { + t.Errorf("expected context %q in %v", want, names) + } + } +} + +// TestIMP_CLI_003_MultiFileUniqueContexts verifies that contexts from multiple +// kubeconfig files are all discovered when names are unique. +func TestIMP_CLI_003_MultiFileUniqueContexts(t *testing.T) { + dir := t.TempDir() + path1 := writeMinimalKubeconfig(t, dir, "config-a", []string{"ctx-a"}) + path2 := writeMinimalKubeconfig(t, dir, "config-b", []string{"ctx-b"}) + t.Setenv(clientcmd.RecommendedConfigPathEnvVar, path1+string(os.PathListSeparator)+path2) + + refs, err := listContextRefs() + if err != nil { + t.Fatalf("listContextRefs: %v", err) + } + if len(refs) != 2 { + t.Fatalf("expected 2 refs, got %d", len(refs)) + } +} + +// TestIMP_CLI_003_MultiFileDuplicateContextsBothProcessed verifies that when +// the same context name appears in multiple files, both are returned. +func TestIMP_CLI_003_MultiFileDuplicateContextsBothProcessed(t *testing.T) { + dir := t.TempDir() + path1 := writeMinimalKubeconfig(t, dir, "config", []string{"admin"}) + path2 := writeMinimalKubeconfig(t, dir, "config-secured-cluster", []string{"admin"}) + t.Setenv(clientcmd.RecommendedConfigPathEnvVar, path1+string(os.PathListSeparator)+path2) + + refs, err := listContextRefs() + if err != nil { + t.Fatalf("listContextRefs: %v", err) + } + if len(refs) != 2 { + t.Fatalf("expected 2 refs (one per file), got %d", len(refs)) + } + if refs[0].KubeconfigFile == refs[1].KubeconfigFile { + t.Error("expected refs from different files") + } +} + +// TestIMP_CLI_003_PerFileIsolation verifies that each file is loaded +// independently: a user named "user-admin" in file A gets its own credentials, +// not file B's. +func TestIMP_CLI_003_PerFileIsolation(t *testing.T) { + dir := t.TempDir() + path1 := writeMinimalKubeconfig(t, dir, "config", []string{"admin"}) + path2 := writeMinimalKubeconfig(t, dir, "config-cluster-2", []string{"admin"}) + t.Setenv(clientcmd.RecommendedConfigPathEnvVar, path1+string(os.PathListSeparator)+path2) + + refs, err := listContextRefs() + if err != nil { + t.Fatalf("listContextRefs: %v", err) + } + + // Build rest.Config for each ref and verify they use their own file's token. + for _, ref := range refs { + cfg, err := restConfigForRef(ref) + if err != nil { + t.Fatalf("restConfigForRef(%s from %s): %v", ref.Context, ref.KubeconfigFile, err) + } + expectedToken := "token-" + filepath.Base(ref.KubeconfigFile) + "-admin" + if cfg.BearerToken != expectedToken { + t.Errorf("ref from %s: expected token %q, got %q (credential isolation broken)", + filepath.Base(ref.KubeconfigFile), expectedToken, cfg.BearerToken) + } + } +} + +// TestIMP_CLI_003_FilterByContextName verifies that --context filtering matches +// context names across all files. +func TestIMP_CLI_003_FilterByContextName(t *testing.T) { + dir := t.TempDir() + path1 := writeMinimalKubeconfig(t, dir, "config", []string{"admin", "staging"}) + path2 := writeMinimalKubeconfig(t, dir, "config-cluster-2", []string{"admin"}) + t.Setenv(clientcmd.RecommendedConfigPathEnvVar, path1+string(os.PathListSeparator)+path2) + + refs, err := listContextRefs() + if err != nil { + t.Fatalf("listContextRefs: %v", err) + } + + // Filter by "admin" — should match both files. + filtered := filterRefs(refs, []string{"admin"}) + if len(filtered) != 2 { + t.Errorf("filter by 'admin': expected 2 matches, got %d", len(filtered)) + } + + // Filter by "staging" — should match one. + filtered = filterRefs(refs, []string{"staging"}) + if len(filtered) != 1 { + t.Errorf("filter by 'staging': expected 1 match, got %d", len(filtered)) + } + + // Filter by nonexistent — should match none. + filtered = filterRefs(refs, []string{"nonexistent"}) + if len(filtered) != 0 { + t.Errorf("filter by 'nonexistent': expected 0 matches, got %d", len(filtered)) + } +} + +// TestIMP_CLI_003_NoKubeconfigFiles verifies clear error when no files exist. +func TestIMP_CLI_003_NoKubeconfigFiles(t *testing.T) { + t.Setenv(clientcmd.RecommendedConfigPathEnvVar, "/nonexistent/path") + t.Setenv("HOME", t.TempDir()) + + _, err := listContextRefs() + if err == nil { + t.Fatal("expected error when no kubeconfig files exist") + } +} diff --git a/scripts/compliance-operator-importer/internal/run/multi_cluster.go b/scripts/compliance-operator-importer/internal/run/multi_cluster.go new file mode 100644 index 0000000000000..f3bc0455f6bd6 --- /dev/null +++ b/scripts/compliance-operator-importer/internal/run/multi_cluster.go @@ -0,0 +1,246 @@ +package run + +import ( + "context" + "fmt" + + "github.com/stackrox/co-acs-importer/internal/adopt" + "github.com/stackrox/co-acs-importer/internal/cofetch" + "github.com/stackrox/co-acs-importer/internal/mapping" + "github.com/stackrox/co-acs-importer/internal/merge" + "github.com/stackrox/co-acs-importer/internal/models" + "github.com/stackrox/co-acs-importer/internal/problems" + "github.com/stackrox/co-acs-importer/internal/reconcile" + "github.com/stackrox/co-acs-importer/internal/report" +) + +// RunMultiCluster executes the importer in multi-cluster mode. +// +// Steps: +// 1. List existing ACS scan configs to build the existingNames set. +// 2. For each cluster source: +// a. List ScanSettingBindings. +// b. Map each SSB to an ACS payload, using the cluster's ACS ID. +// 3. Merge SSBs across clusters by name. +// 4. Reconcile merged payloads against ACS. +// 5. Build and write report. +// 6. Print console summary. +// 7. Return exit code. +func (r *Runner) RunMultiCluster(ctx context.Context, sources []ClusterSource) int { + collector := problems.NewCollector() + builder := report.NewBuilder(r.cfg) + + // Step 1: list existing ACS scan configs. + r.status.Stage("Inventory", "listing existing ACS scan configurations") + summaries, err := r.acsClient.ListScanConfigurations(ctx) + if err != nil { + r.status.Failf("failed to list ACS scan configurations: %v", err) + return ExitFatalError + } + existingNames := make(map[string]string, len(summaries)) + for _, s := range summaries { + existingNames[s.ScanName] = s.ID + } + r.status.OKf("found %d existing scan configurations", len(summaries)) + + // ssbClusterInfo tracks per-SSB per-cluster metadata needed for adoption. + type ssbClusterInfo struct { + namespace string + oldSettingRef string + clusterLabel string + coClient cofetch.COClient + preExistingSS map[string]bool + } + // Key: SSB name, value: list of cluster infos (one per cluster that has the SSB). + ssbAdoptionMap := make(map[string][]ssbClusterInfo) + + // Step 2: collect SSBs from all clusters and map them. + clusterSSBs := make(map[string][]merge.MappedSSB) + + for _, source := range sources { + r.status.Stagef("Scan", "cluster %s (ACS ID: %s)", source.Label, source.ACSClusterID) + + bindings, err := source.COClient.ListScanSettingBindings(ctx) + if err != nil { + r.status.Warnf("failed to list ScanSettingBindings from %s: %v", source.Label, err) + collector.Add(models.Problem{ + Severity: models.SeverityError, + Category: models.CategoryInput, + ResourceRef: source.Label, + Description: fmt.Sprintf("Failed to list ScanSettingBindings from cluster %q: %v", source.Label, err), + FixHint: "Check cluster connectivity and permissions.", + Skipped: true, + }) + continue + } + + r.status.OKf("found %d ScanSettingBindings", len(bindings)) + + // Snapshot which ScanSettings (named after SSBs) already exist on + // this cluster before reconciliation, for the adoption pre-existence check. + preExistingSS := make(map[string]bool) + for _, b := range bindings { + if _, err := source.COClient.GetScanSetting(ctx, b.Namespace, b.Name); err == nil { + preExistingSS[b.Name] = true + } + } + + for _, binding := range bindings { + // Fetch the ScanSetting. + ss, err := source.COClient.GetScanSetting(ctx, binding.Namespace, binding.ScanSettingName) + if err != nil { + r.status.Warnf("%s → ScanSetting %q not found on cluster %s: %v", binding.Name, binding.ScanSettingName, source.Label, err) + collector.Add(models.Problem{ + Severity: models.SeverityError, + Category: models.CategoryInput, + ResourceRef: fmt.Sprintf("%s:%s/%s", source.Label, binding.Namespace, binding.Name), + Description: fmt.Sprintf("ScanSetting %q referenced by binding %q in cluster %q could not be fetched: %v", binding.ScanSettingName, binding.Name, source.Label, err), + FixHint: fmt.Sprintf("Ensure ScanSetting %q exists in namespace %q on cluster %q.", binding.ScanSettingName, binding.Namespace, source.Label), + Skipped: true, + }) + continue + } + + // Map the binding to an ACS payload. + // Create a temporary config with the cluster ID for this source. + tempCfg := *r.cfg + tempCfg.ACSClusterID = source.ACSClusterID + + result := mapping.MapBinding(binding, ss, &tempCfg) + if result.Problem != nil { + r.status.Warnf("%s → mapping error: %s", binding.Name, result.Problem.Description) + collector.Add(*result.Problem) + continue + } + + // Track metadata for adoption. + ssbAdoptionMap[binding.Name] = append(ssbAdoptionMap[binding.Name], ssbClusterInfo{ + namespace: binding.Namespace, + oldSettingRef: binding.ScanSettingName, + clusterLabel: source.Label, + coClient: source.COClient, + preExistingSS: preExistingSS, + }) + + // Add to the cluster's SSB list for merging. + clusterSSBs[source.ACSClusterID] = append(clusterSSBs[source.ACSClusterID], merge.MappedSSB{ + Name: binding.Name, + Profiles: extractProfileNames(binding), + Payload: *result.Payload, + }) + } + } + + // Step 3: merge SSBs across clusters. + r.status.Stage("Merge", "combining ScanSettingBindings across clusters") + mergeResult := merge.MergeSSBs(clusterSSBs) + + for _, problem := range mergeResult.Problems { + collector.Add(problem) + r.status.Warnf("%s: %s", problem.ResourceRef, problem.Description) + } + + r.status.OKf("merged into %d unique scan configurations", len(mergeResult.Merged)) + + // Step 4: reconcile merged payloads. + r.status.Stage("Reconcile", "applying scan configurations to ACS") + maxRetries := r.cfg.MaxRetries + if maxRetries < 1 { + maxRetries = 1 + } + rec := reconcile.NewReconciler(r.acsClient, maxRetries, r.cfg.DryRun, r.cfg.OverwriteExisting) + + var adoptRequests []adopt.Request + + for _, merged := range mergeResult.Merged { + source := models.ReportItemSource{ + BindingName: merged.Name, + // For multi-cluster, namespace and scanSettingName are per-cluster, so we leave them generic. + Namespace: "multi-cluster", + ScanSettingName: "merged", + } + + action := rec.Apply(ctx, merged.Payload, source, existingNames) + + switch action.ActionType { + case "create": + r.status.OKf("%s → created (%d clusters)", merged.Name, len(merged.Payload.Clusters)) + case "update": + r.status.OKf("%s → updated (%d clusters)", merged.Name, len(merged.Payload.Clusters)) + case "skip": + r.status.Detailf("%s → skipped (already exists)", merged.Name) + case "fail": + if action.Err != nil { + r.status.Failf("%s → %s", merged.Name, action.Err) + } else { + r.status.Failf("%s → %s", merged.Name, action.Reason) + } + } + + item := models.ReportItem{ + Source: action.Source, + Action: action.ActionType, + Reason: action.Reason, + Attempts: action.Attempts, + ACSScanConfigID: action.ACSScanConfigID, + } + if action.Err != nil { + item.Error = action.Err.Error() + } + builder.RecordItem(item) + + if action.Problem != nil { + collector.Add(*action.Problem) + } + + // Collect adoption requests for successfully created scan configs. + if action.ActionType == "create" && !r.cfg.DryRun { + for _, info := range ssbAdoptionMap[merged.Name] { + adoptRequests = append(adoptRequests, adopt.Request{ + SSBName: merged.Name, + SSBNamespace: info.namespace, + OldSettingRef: info.oldSettingRef, + ClusterLabel: info.clusterLabel, + COClient: info.coClient, + PreExistingScanSettings: info.preExistingSS, + }) + } + } + } + + // Step 4b: adopt SSBs whose scan configs were just created. + if len(adoptRequests) > 0 { + r.runAdoption(ctx, adoptRequests) + } + + // Step 5: build and write report. + finalReport := builder.Build(collector.All()) + + if r.cfg.ReportJSON != "" { + r.status.Stage("Report", "writing JSON report") + if err := builder.WriteJSON(r.cfg.ReportJSON, finalReport); err != nil { + r.status.Warnf("failed to write JSON report to %q: %v", r.cfg.ReportJSON, err) + } else { + r.status.OKf("report written to %s", r.cfg.ReportJSON) + } + } + + // Step 6: print console summary. + r.printf("\n") + r.printSummary(finalReport) + + // Step 7: determine exit code. + if finalReport.Counts.Failed > 0 || collector.HasErrors() { + return ExitPartialError + } + return ExitSuccess +} + +// extractProfileNames extracts profile names from a binding. +func extractProfileNames(binding cofetch.ScanSettingBinding) []string { + var names []string + for _, p := range binding.Profiles { + names = append(names, p.Name) + } + return names +} diff --git a/scripts/compliance-operator-importer/internal/run/run.go b/scripts/compliance-operator-importer/internal/run/run.go new file mode 100644 index 0000000000000..a32182007b630 --- /dev/null +++ b/scripts/compliance-operator-importer/internal/run/run.go @@ -0,0 +1,282 @@ +// Package run orchestrates a full importer execution: CO discovery, ACS +// reconciliation, problem collection, report generation, and exit-code +// determination. +package run + +import ( + "context" + "fmt" + "io" + "os" + + "github.com/stackrox/co-acs-importer/internal/adopt" + "github.com/stackrox/co-acs-importer/internal/cofetch" + "github.com/stackrox/co-acs-importer/internal/mapping" + "github.com/stackrox/co-acs-importer/internal/models" + "github.com/stackrox/co-acs-importer/internal/problems" + "github.com/stackrox/co-acs-importer/internal/reconcile" + "github.com/stackrox/co-acs-importer/internal/report" + "github.com/stackrox/co-acs-importer/internal/status" +) + +// Exit code constants (IMP-CLI-017..019, IMP-ERR-003). +const ( + ExitSuccess = 0 // all bindings processed without failures + ExitFatalError = 1 // preflight/config failure; no import attempted + ExitPartialError = 2 // at least one binding failed +) + +// Runner orchestrates the full import run. +type Runner struct { + cfg *models.Config + acsClient models.ACSClient + coClient cofetch.COClient + out io.Writer // injectable; defaults to os.Stdout + status *status.Printer // stage-by-stage progress output +} + +// NewRunner creates a Runner ready to execute, writing console output to os.Stdout. +func NewRunner(cfg *models.Config, acsClient models.ACSClient, coClient cofetch.COClient) *Runner { + return &Runner{ + cfg: cfg, + acsClient: acsClient, + coClient: coClient, + out: os.Stdout, + status: status.New(), + } +} + +// WithOutput returns a shallow copy of the Runner writing console output to w. +// Intended for tests that need to capture or suppress printed output. +func (r *Runner) WithOutput(w io.Writer) *Runner { + cp := *r + cp.out = w + cp.status = status.NewWithWriter(w) + return &cp +} + +// printf is a convenience wrapper so callers don't need to handle format errors. +func (r *Runner) printf(format string, args ...any) { + fmt.Fprintf(r.out, format, args...) //nolint:errcheck // best-effort console output +} + +// Run executes the full import and returns the appropriate exit code. +// +// Execution steps: +// 1. List existing ACS scan config names to build the existingNames set. +// 2. List ScanSettingBindings from the CO source cluster. +// 3. For each binding: fetch its ScanSetting, build the ACS payload, reconcile. +// 4. Collect all problems and build the final Report. +// 5. Optionally write the JSON report to --report-json path. +// 6. Print the console summary (IMP-CLI-020). +// 7. Return exit code 0, 1, or 2 (IMP-CLI-017..019, IMP-ERR-003). +func (r *Runner) Run(ctx context.Context) int { + collector := problems.NewCollector() + builder := report.NewBuilder(r.cfg) + + // Step 1: list existing ACS scan configs to populate the deduplication set. + r.status.Stage("Inventory", "listing existing ACS scan configurations") + summaries, err := r.acsClient.ListScanConfigurations(ctx) + if err != nil { + r.status.Failf("failed to list ACS scan configurations: %v", err) + return ExitFatalError + } + existingNames := make(map[string]string, len(summaries)) + for _, s := range summaries { + existingNames[s.ScanName] = s.ID + } + r.status.OKf("found %d existing scan configurations", len(summaries)) + + // Step 2: discover CO ScanSettingBindings. + r.status.Stage("Scan", "listing ScanSettingBindings from cluster") + bindings, err := r.coClient.ListScanSettingBindings(ctx) + if err != nil { + r.status.Failf("failed to list ScanSettingBindings: %v", err) + return ExitFatalError + } + r.status.OKf("found %d ScanSettingBindings", len(bindings)) + + // maxRetries defaults to 1 (single attempt) when cfg.MaxRetries is zero. + maxRetries := r.cfg.MaxRetries + if maxRetries < 1 { + maxRetries = 1 + } + rec := reconcile.NewReconciler(r.acsClient, maxRetries, r.cfg.DryRun, r.cfg.OverwriteExisting) + + // Snapshot which ScanSettings (named after SSBs) already exist on the + // cluster before reconciliation. Used during adoption to avoid patching + // an SSB onto a pre-existing ScanSetting that ACS doesn't control. + preExistingSS := make(map[string]bool) + for _, binding := range bindings { + if _, err := r.coClient.GetScanSetting(ctx, binding.Namespace, binding.Name); err == nil { + preExistingSS[binding.Name] = true + } + } + + // Step 3: process each binding independently. + r.status.Stage("Reconcile", "applying scan configurations to ACS") + var adoptRequests []adopt.Request + for _, binding := range bindings { + action := r.processBinding(ctx, binding, existingNames, rec, collector, builder) + if action == "create" && !r.cfg.DryRun { + adoptRequests = append(adoptRequests, adopt.Request{ + SSBName: binding.Name, + SSBNamespace: binding.Namespace, + OldSettingRef: binding.ScanSettingName, + ClusterLabel: "default", + COClient: r.coClient, + PreExistingScanSettings: preExistingSS, + }) + } + } + + // Step 3b: adopt SSBs whose scan configs were just created. + if len(adoptRequests) > 0 { + r.runAdoption(ctx, adoptRequests) + } + + // Step 4: build the final report. + finalReport := builder.Build(collector.All()) + + // Step 5: write JSON report when requested. + if r.cfg.ReportJSON != "" { + r.status.Stage("Report", "writing JSON report") + if err := builder.WriteJSON(r.cfg.ReportJSON, finalReport); err != nil { + r.status.Warnf("failed to write JSON report to %q: %v", r.cfg.ReportJSON, err) + } else { + r.status.OKf("report written to %s", r.cfg.ReportJSON) + } + } + + // Step 6: print console summary. + r.printf("\n") + r.printSummary(finalReport) + + // Step 7: determine exit code. + if finalReport.Counts.Failed > 0 || collector.HasErrors() { + return ExitPartialError + } + return ExitSuccess +} + +// processBinding handles a single ScanSettingBinding: fetches its ScanSetting, +// maps it to an ACS payload, and calls the reconciler. All failures are recorded +// as problems and do not abort processing of remaining bindings. +// Returns the action type ("create", "update", "skip", "fail", or "" on early return). +func (r *Runner) processBinding( + ctx context.Context, + binding cofetch.ScanSettingBinding, + existingNames map[string]string, + rec *reconcile.Reconciler, + collector *problems.Collector, + builder *report.Builder, +) string { + // Derive a stable resource reference for problem entries. + resourceRef := fmt.Sprintf("%s/%s", binding.Namespace, binding.Name) + + // Build the source for ReportItem entries. + source := models.ReportItemSource{ + Namespace: binding.Namespace, + BindingName: binding.Name, + ScanSettingName: binding.ScanSettingName, + } + + // Fetch the referenced ScanSetting. + ss, err := r.coClient.GetScanSetting(ctx, binding.Namespace, binding.ScanSettingName) + if err != nil { + r.status.Failf("%s → ScanSetting %q not found", binding.Name, binding.ScanSettingName) + collector.Add(models.Problem{ + Severity: models.SeverityError, + Category: models.CategoryInput, + ResourceRef: resourceRef, + Description: fmt.Sprintf("ScanSetting %q referenced by binding %q could not be fetched: %v", binding.ScanSettingName, binding.Name, err), + FixHint: fmt.Sprintf("Ensure ScanSetting %q exists in namespace %q and the importer service account has read access.", binding.ScanSettingName, binding.Namespace), + Skipped: true, + }) + builder.RecordItem(models.ReportItem{ + Source: source, + Action: "fail", + Reason: "ScanSetting not found", + Error: err.Error(), + }) + return "" + } + + // Map the CO resources to an ACS create payload. + result := mapping.MapBinding(binding, ss, r.cfg) + if result.Problem != nil { + r.status.Failf("%s → mapping error: %s", binding.Name, result.Problem.Description) + collector.Add(*result.Problem) + builder.RecordItem(models.ReportItem{ + Source: source, + Action: "fail", + Reason: "mapping error", + Error: result.Problem.Description, + }) + return "" + } + + // Reconcile: create, update, or skip. + action := rec.Apply(ctx, *result.Payload, source, existingNames) + + switch action.ActionType { + case "create": + r.status.OKf("%s → created", binding.Name) + case "update": + r.status.OKf("%s → updated", binding.Name) + case "skip": + r.status.Detailf("%s → skipped (already exists)", binding.Name) + case "fail": + if action.Err != nil { + r.status.Failf("%s → %s", binding.Name, action.Err) + } else { + r.status.Failf("%s → %s", binding.Name, action.Reason) + } + } + + item := models.ReportItem{ + Source: action.Source, + Action: action.ActionType, + Reason: action.Reason, + Attempts: action.Attempts, + ACSScanConfigID: action.ACSScanConfigID, + } + if action.Err != nil { + item.Error = action.Err.Error() + } + builder.RecordItem(item) + + if action.Problem != nil { + collector.Add(*action.Problem) + } + return action.ActionType +} + +// runAdoption runs the SSB adoption step for all requests, logging results. +func (r *Runner) runAdoption(ctx context.Context, requests []adopt.Request) { + r.status.Stage("Adopt", "patching SSB settingsRef to ACS-managed ScanSettings") + adopter := adopt.New() + results := adopter.Adopt(ctx, requests) + for _, res := range results { + switch { + case res.Adopted: + r.status.OKf("%s", res.Message) + case res.Skipped: + r.status.Detailf("%s", res.Message) + case res.TimedOut: + r.status.Warnf("%s", res.Message) + case res.Err != nil: + r.status.Warnf("%s", res.Message) + } + } +} + +// printSummary writes the console summary to the configured output. +func (r *Runner) printSummary(rep models.Report) { + mode := "live" + if r.cfg.DryRun { + mode = "dry-run" + } + r.status.Stagef("Done", "%s | discovered: %d, created: %d, updated: %d, skipped: %d, failed: %d", + mode, rep.Counts.Discovered, rep.Counts.Create, rep.Counts.Update, rep.Counts.Skip, rep.Counts.Failed) +} diff --git a/scripts/compliance-operator-importer/internal/run/run_test.go b/scripts/compliance-operator-importer/internal/run/run_test.go new file mode 100644 index 0000000000000..92f96bffe24a2 --- /dev/null +++ b/scripts/compliance-operator-importer/internal/run/run_test.go @@ -0,0 +1,553 @@ +package run_test + +import ( + "bytes" + "context" + "errors" + "fmt" + "strings" + "testing" + + "github.com/stackrox/co-acs-importer/internal/cofetch" + "github.com/stackrox/co-acs-importer/internal/models" + "github.com/stackrox/co-acs-importer/internal/run" +) + +// --------------------------------------------------------------------------- +// Mock: models.ACSClient +// --------------------------------------------------------------------------- + +type mockACSClient struct { + listErr error + listResult []models.ACSConfigSummary + createErr error + createID string + createCalls int +} + +func (m *mockACSClient) Preflight(_ context.Context) error { return nil } + +func (m *mockACSClient) ListScanConfigurations(_ context.Context) ([]models.ACSConfigSummary, error) { + if m.listErr != nil { + return nil, m.listErr + } + return m.listResult, nil +} + +func (m *mockACSClient) CreateScanConfiguration(_ context.Context, _ models.ACSCreatePayload) (string, error) { + m.createCalls++ + if m.createErr != nil { + return "", m.createErr + } + id := m.createID + if id == "" { + id = fmt.Sprintf("new-id-%d", m.createCalls) + } + return id, nil +} + +func (m *mockACSClient) UpdateScanConfiguration(_ context.Context, _ string, _ models.ACSCreatePayload) error { + // For now, this is a no-op in run tests since we focus on create-only mode. + // Update-specific tests are in reconcile_test.go. + return nil +} + +func (m *mockACSClient) ListClusters(_ context.Context) ([]models.ACSClusterInfo, error) { + // Not used in run tests, return empty list + return []models.ACSClusterInfo{}, nil +} + +// Compile-time check: mockACSClient satisfies models.ACSClient. +var _ models.ACSClient = (*mockACSClient)(nil) + +// --------------------------------------------------------------------------- +// Mock: cofetch.COClient +// --------------------------------------------------------------------------- + +type mockCOClient struct { + bindings []cofetch.ScanSettingBinding + listErr error + scanSetting *cofetch.ScanSetting // returned for any name not in scanSettingsByName + settingErr error + // scanSettingsByName provides name-aware lookups. When set, only names + // present in this map (plus the primary scanSetting's own name) return + // a result. When nil, the primary scanSetting is returned for any name + // (legacy behaviour). + scanSettingsByName map[string]*cofetch.ScanSetting +} + +func (m *mockCOClient) ListScanSettingBindings(_ context.Context) ([]cofetch.ScanSettingBinding, error) { + if m.listErr != nil { + return nil, m.listErr + } + return m.bindings, nil +} + +func (m *mockCOClient) GetScanSetting(_ context.Context, _, name string) (*cofetch.ScanSetting, error) { + if m.settingErr != nil { + return nil, m.settingErr + } + if m.scanSettingsByName != nil { + if ss, ok := m.scanSettingsByName[name]; ok { + return ss, nil + } + return nil, fmt.Errorf("ScanSetting %q not found", name) + } + // Legacy: return the primary scan setting for any name. + return m.scanSetting, nil +} + +func (m *mockCOClient) PatchSSBSettingsRef(_ context.Context, _, _, _ string) error { + return nil +} + +// Compile-time check: mockCOClient satisfies cofetch.COClient. +var _ cofetch.COClient = (*mockCOClient)(nil) + +// --------------------------------------------------------------------------- +// Helpers +// --------------------------------------------------------------------------- + +// httpStatusError lets the reconciler identify transient vs. non-transient codes. +type httpStatusError struct { + code int +} + +func (e *httpStatusError) Error() string { return fmt.Sprintf("HTTP %d", e.code) } +func (e *httpStatusError) StatusCode() int { return e.code } + +// baseConfig returns a valid Config for most tests. +func baseConfig() *models.Config { + return &models.Config{ + ACSEndpoint: "https://acs.example.com", + ACSClusterID: "cluster-a", + CONamespace: "openshift-compliance", + MaxRetries: 1, + } +} + +// goodBinding returns a ScanSettingBinding that maps cleanly to an ACS payload. +func goodBinding(name string) cofetch.ScanSettingBinding { + return cofetch.ScanSettingBinding{ + Namespace: "openshift-compliance", + Name: name, + ScanSettingName: "default-auto-apply", + Profiles: []cofetch.ProfileRef{ + {Name: "ocp4-cis", Kind: "Profile"}, + }, + } +} + +// goodScanSetting returns a ScanSetting with a valid daily cron schedule. +func goodScanSetting() *cofetch.ScanSetting { + return &cofetch.ScanSetting{ + Namespace: "openshift-compliance", + Name: "default-auto-apply", + Schedule: "0 1 * * *", + } +} + +// runWithCapture executes Run and captures all printed output. +func runWithCapture(t *testing.T, cfg *models.Config, acs models.ACSClient, co cofetch.COClient) (int, string) { + t.Helper() + var buf bytes.Buffer + r := run.NewRunner(cfg, acs, co).WithOutput(&buf) + code := r.Run(context.Background()) + return code, buf.String() +} + +// --------------------------------------------------------------------------- +// Tests: exit codes (IMP-CLI-017, IMP-CLI-018, IMP-CLI-019, IMP-ERR-003) +// --------------------------------------------------------------------------- + +// TestIMP_CLI_017_AllSuccessExitZero verifies that when all bindings are +// created successfully the runner returns exit code 0. +// Requirements: IMP-CLI-017, IMP-ERR-003. +func TestIMP_CLI_017_AllSuccessExitZero(t *testing.T) { + acsClient := &mockACSClient{} // no existing configs, create succeeds + coClient := &mockCOClient{ + bindings: []cofetch.ScanSettingBinding{goodBinding("cis-weekly")}, + scanSetting: goodScanSetting(), + } + + code, _ := runWithCapture(t, baseConfig(), acsClient, coClient) + + if code != run.ExitSuccess { + t.Errorf("IMP-CLI-017: expected exit code %d (success), got %d", run.ExitSuccess, code) + } +} + +// TestIMP_CLI_017_EmptyBindingListExitZero verifies that an empty binding +// list (nothing to import) also produces exit code 0. +// Requirement: IMP-CLI-017. +func TestIMP_CLI_017_EmptyBindingListExitZero(t *testing.T) { + acsClient := &mockACSClient{} + coClient := &mockCOClient{bindings: []cofetch.ScanSettingBinding{}} + + code, _ := runWithCapture(t, baseConfig(), acsClient, coClient) + + if code != run.ExitSuccess { + t.Errorf("IMP-CLI-017: expected exit code %d for empty run, got %d", run.ExitSuccess, code) + } +} + +// TestIMP_CLI_018_ListACSConfigsFatalExitOne verifies that a fatal failure +// when listing ACS scan configurations returns exit code 1. +// Requirements: IMP-CLI-018, IMP-ERR-003. +func TestIMP_CLI_018_ListACSConfigsFatalExitOne(t *testing.T) { + acsClient := &mockACSClient{listErr: errors.New("ACS unreachable")} + coClient := &mockCOClient{} + + code, output := runWithCapture(t, baseConfig(), acsClient, coClient) + + if code != run.ExitFatalError { + t.Errorf("IMP-CLI-018: expected exit code %d (fatal), got %d", run.ExitFatalError, code) + } + if !strings.Contains(output, "✗") { + t.Errorf("IMP-CLI-018: expected failure marker in output, got: %q", output) + } +} + +// TestIMP_CLI_018_ListBindingsFatalExitOne verifies that a fatal failure +// when listing CO ScanSettingBindings returns exit code 1. +// Requirements: IMP-CLI-018, IMP-ERR-003. +func TestIMP_CLI_018_ListBindingsFatalExitOne(t *testing.T) { + acsClient := &mockACSClient{} + coClient := &mockCOClient{listErr: errors.New("k8s unreachable")} + + code, output := runWithCapture(t, baseConfig(), acsClient, coClient) + + if code != run.ExitFatalError { + t.Errorf("IMP-CLI-018: expected exit code %d (fatal), got %d", run.ExitFatalError, code) + } + if !strings.Contains(output, "✗") { + t.Errorf("IMP-CLI-018: expected failure marker in output, got: %q", output) + } +} + +// TestIMP_CLI_019_SomeFailedExitTwo verifies that when at least one binding +// fails the runner returns exit code 2. +// Requirements: IMP-CLI-019, IMP-ERR-003. +func TestIMP_CLI_019_SomeFailedExitTwo(t *testing.T) { + // Inject a non-transient 400 error so the binding fails without retry. + acsClient := &mockACSClient{ + createErr: &httpStatusError{code: 400}, + } + coClient := &mockCOClient{ + bindings: []cofetch.ScanSettingBinding{goodBinding("cis-weekly")}, + scanSetting: goodScanSetting(), + } + + code, _ := runWithCapture(t, baseConfig(), acsClient, coClient) + + if code != run.ExitPartialError { + t.Errorf("IMP-CLI-019: expected exit code %d (partial), got %d", run.ExitPartialError, code) + } +} + +// TestIMP_CLI_019_MissingScanSettingExitTwo verifies that a missing ScanSetting +// causes a binding-level failure that results in exit code 2. +// Requirements: IMP-CLI-019, IMP-ERR-003. +func TestIMP_CLI_019_MissingScanSettingExitTwo(t *testing.T) { + acsClient := &mockACSClient{} + coClient := &mockCOClient{ + bindings: []cofetch.ScanSettingBinding{goodBinding("broken")}, + settingErr: errors.New("ScanSetting not found"), + } + + code, _ := runWithCapture(t, baseConfig(), acsClient, coClient) + + if code != run.ExitPartialError { + t.Errorf("IMP-CLI-019: expected exit code %d (partial), got %d", run.ExitPartialError, code) + } +} + +// TestIMP_ERR_003_ExitCodesMapCorrectly exercises all three exit code paths in +// a single test to confirm the mapping is exact. +// Requirement: IMP-ERR-003. +func TestIMP_ERR_003_ExitCodesMapCorrectly(t *testing.T) { + tests := []struct { + name string + wantCode int + acs *mockACSClient + co *mockCOClient + }{ + { + name: "all_successful", + wantCode: run.ExitSuccess, + acs: &mockACSClient{}, + co: &mockCOClient{ + bindings: []cofetch.ScanSettingBinding{goodBinding("ok")}, + scanSetting: goodScanSetting(), + }, + }, + { + name: "fatal_acs_list", + wantCode: run.ExitFatalError, + acs: &mockACSClient{listErr: errors.New("down")}, + co: &mockCOClient{}, + }, + { + name: "partial_binding_failure", + wantCode: run.ExitPartialError, + acs: &mockACSClient{createErr: &httpStatusError{code: 400}}, + co: &mockCOClient{ + bindings: []cofetch.ScanSettingBinding{goodBinding("fail")}, + scanSetting: goodScanSetting(), + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + code, _ := runWithCapture(t, baseConfig(), tc.acs, tc.co) + if code != tc.wantCode { + t.Errorf("IMP-ERR-003: %s: expected exit code %d, got %d", tc.name, tc.wantCode, code) + } + }) + } +} + +// --------------------------------------------------------------------------- +// Tests: console output (IMP-CLI-020) +// --------------------------------------------------------------------------- + +// TestIMP_CLI_020_ConsoleSummaryIncludesAllCounters verifies that the console +// summary contains discovered, created, skipped, and failed counts. +// Requirement: IMP-CLI-020. +func TestIMP_CLI_020_ConsoleSummaryIncludesAllCounters(t *testing.T) { + // Two bindings: one creates, one is skipped because it already exists. + acsClient := &mockACSClient{ + listResult: []models.ACSConfigSummary{ + {ID: "existing-id", ScanName: "existing-scan"}, + }, + } + coClient := &mockCOClient{ + bindings: []cofetch.ScanSettingBinding{ + goodBinding("new-scan"), + goodBinding("existing-scan"), // will be skipped + }, + scanSetting: goodScanSetting(), + } + + _, output := runWithCapture(t, baseConfig(), acsClient, coClient) + + requiredPhrases := []string{ + "discovered:", + "created:", + "skipped:", + "failed:", + } + for _, phrase := range requiredPhrases { + if !strings.Contains(output, phrase) { + t.Errorf("IMP-CLI-020: output missing %q\nGot:\n%s", phrase, output) + } + } +} + +// TestIMP_CLI_020_DryRunLabelInSummary verifies that the summary includes +// the dry-run indicator. +// Requirement: IMP-CLI-020. +func TestIMP_CLI_020_DryRunLabelInSummary(t *testing.T) { + cfg := baseConfig() + cfg.DryRun = true + + coClient := &mockCOClient{ + bindings: []cofetch.ScanSettingBinding{goodBinding("cis-weekly")}, + scanSetting: goodScanSetting(), + } + + _, output := runWithCapture(t, cfg, &mockACSClient{}, coClient) + + if !strings.Contains(output, "dry-run") { + t.Errorf("IMP-CLI-020: expected 'dry-run' in output, got:\n%s", output) + } +} + +// TestIMP_CLI_020_NonDryRunLabelInSummary verifies the non-dry-run label. +// Requirement: IMP-CLI-020. +func TestIMP_CLI_020_NonDryRunLabelInSummary(t *testing.T) { + cfg := baseConfig() + cfg.DryRun = false + + coClient := &mockCOClient{ + bindings: []cofetch.ScanSettingBinding{goodBinding("cis-weekly")}, + scanSetting: goodScanSetting(), + } + + _, output := runWithCapture(t, cfg, &mockACSClient{}, coClient) + + if !strings.Contains(output, "live") { + t.Errorf("IMP-CLI-020: expected 'live' in output, got:\n%s", output) + } +} + +// TestIMP_CLI_020_CorrectCountsInSummary verifies that counts reported in the +// console summary are numerically correct. +// Requirement: IMP-CLI-020. +func TestIMP_CLI_020_CorrectCountsInSummary(t *testing.T) { + // Arrange: 3 bindings discovered, 2 create, 1 skipped (existing). + acsClient := &mockACSClient{ + listResult: []models.ACSConfigSummary{ + {ID: "id-existing", ScanName: "scan-c"}, + }, + } + coClient := &mockCOClient{ + bindings: []cofetch.ScanSettingBinding{ + goodBinding("scan-a"), + goodBinding("scan-b"), + goodBinding("scan-c"), // exists => skip + }, + scanSetting: goodScanSetting(), + } + + _, output := runWithCapture(t, baseConfig(), acsClient, coClient) + + if !strings.Contains(output, "discovered: 3") { + t.Errorf("IMP-CLI-020: expected 'discovered: 3' in output, got:\n%s", output) + } + if !strings.Contains(output, "created: 2") { + t.Errorf("IMP-CLI-020: expected 'created: 2' in output, got:\n%s", output) + } + if !strings.Contains(output, "skipped: 1") { + t.Errorf("IMP-CLI-020: expected 'skipped: 1' in output, got:\n%s", output) + } + if !strings.Contains(output, "failed: 0") { + t.Errorf("IMP-CLI-020: expected 'failed: 0' in output, got:\n%s", output) + } +} + +// --------------------------------------------------------------------------- +// Tests: API error => problem recorded (IMP-ERR-004) +// --------------------------------------------------------------------------- + +// TestIMP_ERR_004_APIErrorRecordedAsProblem verifies that a non-transient API +// error causes the binding to be skipped and recorded as a problem in the report. +// The report's failed count must reflect the failure. +// Requirements: IMP-ERR-004, IMP-CLI-022. +func TestIMP_ERR_004_APIErrorRecordedAsProblem(t *testing.T) { + acsClient := &mockACSClient{ + createErr: &httpStatusError{code: 400}, + } + coClient := &mockCOClient{ + bindings: []cofetch.ScanSettingBinding{goodBinding("bad-scan")}, + scanSetting: goodScanSetting(), + } + cfg := baseConfig() + cfg.MaxRetries = 1 + + code, output := runWithCapture(t, cfg, acsClient, coClient) + + // Exit code must be partial failure (IMP-ERR-003). + if code != run.ExitPartialError { + t.Errorf("IMP-ERR-004: expected exit code %d (partial), got %d", run.ExitPartialError, code) + } + // Console summary must show 1 failed. + if !strings.Contains(output, "failed: 1") { + t.Errorf("IMP-ERR-004: expected 'failed: 1' in output, got:\n%s", output) + } +} + +// TestIMP_ERR_004_MissingScanSettingRecordedAsProblem verifies that a missing +// ScanSetting is treated as a binding-level failure and recorded. +// Requirements: IMP-ERR-004, IMP-CLI-022. +func TestIMP_ERR_004_MissingScanSettingRecordedAsProblem(t *testing.T) { + acsClient := &mockACSClient{} + coClient := &mockCOClient{ + bindings: []cofetch.ScanSettingBinding{ + goodBinding("broken"), + goodBinding("ok"), + }, + scanSetting: goodScanSetting(), + } + + // Fail GetScanSetting on the third call (for "broken" binding's ScanSetting lookup). + // The first 2 calls are pre-existence snapshot probes (one per binding). + coClient2 := &selectiveCOClientByOrder{ + base: coClient, + failAtCall: 3, + failErr: errors.New("ScanSetting not found"), + } + + code, output := runWithCapture(t, baseConfig(), acsClient, coClient2) + + // Partial failure: one succeeded, one failed. + if code != run.ExitPartialError { + t.Errorf("IMP-ERR-004: expected exit code %d (partial), got %d", run.ExitPartialError, code) + } + if !strings.Contains(output, "failed: 1") { + t.Errorf("IMP-ERR-004: expected 'failed: 1' in output, got:\n%s", output) + } + if !strings.Contains(output, "created: 1") { + t.Errorf("IMP-ERR-004: expected 'created: 1' in output, got:\n%s", output) + } +} + +// selectiveCOClientByBinding wraps COClient to fail GetScanSetting for a +// specific binding name by inspecting which binding is being processed. +// Since GetScanSetting doesn't receive the binding name, we use a counter-based +// approach: the first call goes to the first binding, etc. +type selectiveCOClientByOrder struct { + base *mockCOClient + failAtCall int // 1-based; call index that should fail + callCount int + failErr error +} + +func (s *selectiveCOClientByOrder) ListScanSettingBindings(ctx context.Context) ([]cofetch.ScanSettingBinding, error) { + return s.base.ListScanSettingBindings(ctx) +} + +func (s *selectiveCOClientByOrder) GetScanSetting(ctx context.Context, namespace, name string) (*cofetch.ScanSetting, error) { + s.callCount++ + if s.callCount == s.failAtCall { + return nil, s.failErr + } + return s.base.GetScanSetting(ctx, namespace, name) +} + +func (s *selectiveCOClientByOrder) PatchSSBSettingsRef(_ context.Context, _, _, _ string) error { + return nil +} + +// --------------------------------------------------------------------------- +// Tests: dry-run mode (IMP-IDEM-004..007) +// --------------------------------------------------------------------------- + +// TestIMP_CLI_007_DryRunNoCreates verifies that no ACS create calls are made +// in dry-run mode. +// Requirement: IMP-CLI-007. +func TestIMP_CLI_007_DryRunNoCreates(t *testing.T) { + acsClient := &mockACSClient{} + coClient := &mockCOClient{ + bindings: []cofetch.ScanSettingBinding{goodBinding("cis-weekly")}, + scanSetting: goodScanSetting(), + } + cfg := baseConfig() + cfg.DryRun = true + + runWithCapture(t, cfg, acsClient, coClient) + + if acsClient.createCalls != 0 { + t.Errorf("IMP-CLI-007: expected 0 create calls in dry-run mode, got %d", acsClient.createCalls) + } +} + +// TestIMP_CLI_007_DryRunReportedAsCreate verifies that dry-run planned creates +// appear as "create" actions in the console summary. +// Requirement: IMP-CLI-007. +func TestIMP_CLI_007_DryRunReportedAsCreate(t *testing.T) { + acsClient := &mockACSClient{} + coClient := &mockCOClient{ + bindings: []cofetch.ScanSettingBinding{goodBinding("cis-weekly")}, + scanSetting: goodScanSetting(), + } + cfg := baseConfig() + cfg.DryRun = true + + _, output := runWithCapture(t, cfg, acsClient, coClient) + + if !strings.Contains(output, "created: 1") { + t.Errorf("IMP-CLI-007: expected 'created: 1' (planned) in dry-run output, got:\n%s", output) + } +} diff --git a/scripts/compliance-operator-importer/internal/status/status.go b/scripts/compliance-operator-importer/internal/status/status.go new file mode 100644 index 0000000000000..664e2d29c58e7 --- /dev/null +++ b/scripts/compliance-operator-importer/internal/status/status.go @@ -0,0 +1,74 @@ +// Package status provides compact, stage-by-stage progress output +// inspired by modern CLI tools and LLM chat interfaces. +package status + +import ( + "fmt" + "io" + "os" +) + +// Printer writes structured status messages to an output writer. +type Printer struct { + out io.Writer +} + +// New creates a Printer that writes to os.Stderr. +func New() *Printer { + return &Printer{out: os.Stderr} +} + +// NewWithWriter creates a Printer that writes to w. +func NewWithWriter(w io.Writer) *Printer { + return &Printer{out: w} +} + +// Stage prints a stage header: "▸ Stage: message". +func (p *Printer) Stage(stage, msg string) { + fmt.Fprintf(p.out, "▸ %s: %s\n", stage, msg) +} + +// Stagef prints a formatted stage header. +func (p *Printer) Stagef(stage, format string, args ...any) { + p.Stage(stage, fmt.Sprintf(format, args...)) +} + +// Detail prints an indented detail line under the current stage. +func (p *Printer) Detail(msg string) { + fmt.Fprintf(p.out, " %s\n", msg) +} + +// Detailf prints a formatted detail line. +func (p *Printer) Detailf(format string, args ...any) { + p.Detail(fmt.Sprintf(format, args...)) +} + +// OK prints a success result for the current stage. +func (p *Printer) OK(msg string) { + fmt.Fprintf(p.out, " ✓ %s\n", msg) +} + +// OKf prints a formatted success result. +func (p *Printer) OKf(format string, args ...any) { + p.OK(fmt.Sprintf(format, args...)) +} + +// Warn prints a warning result for the current stage. +func (p *Printer) Warn(msg string) { + fmt.Fprintf(p.out, " ! %s\n", msg) +} + +// Warnf prints a formatted warning. +func (p *Printer) Warnf(format string, args ...any) { + p.Warn(fmt.Sprintf(format, args...)) +} + +// Fail prints a failure result for the current stage. +func (p *Printer) Fail(msg string) { + fmt.Fprintf(p.out, " ✗ %s\n", msg) +} + +// Failf prints a formatted failure. +func (p *Printer) Failf(format string, args ...any) { + p.Fail(fmt.Sprintf(format, args...)) +} diff --git a/scripts/compliance-operator-importer/run.sh b/scripts/compliance-operator-importer/run.sh new file mode 100755 index 0000000000000..46e322ed5b9cc --- /dev/null +++ b/scripts/compliance-operator-importer/run.sh @@ -0,0 +1,71 @@ +#!/usr/bin/env bash +# run.sh — Run the compliance-operator-importer via container. +# +# Automatically mounts kubeconfig files and forwards ACS auth env vars +# so you don't have to spell out docker/podman flags manually. +# +# USAGE: +# ./run.sh --endpoint central.example.com --dry-run +# ./run.sh --endpoint central.example.com --context my-cluster +# +# ENVIRONMENT (read from host, forwarded to container): +# KUBECONFIG Colon-separated kubeconfig paths (default: ~/.kube/config) +# ROX_ENDPOINT ACS Central URL (alternative to --endpoint) +# ROX_API_TOKEN API token auth +# ROX_ADMIN_PASSWORD Basic auth password +# ROX_ADMIN_USER Basic auth username (default: admin) +# +# IMAGE override: +# IMAGE=my-registry/co-importer:v1 ./run.sh --endpoint ... + +set -euo pipefail + +IMAGE="${IMAGE:-localhost/compliance-operator-importer:latest}" +CONTAINER_RT="${CONTAINER_RT:-$(command -v podman 2>/dev/null || echo docker)}" + +# ── Kubeconfig mounts ──────────────────────────────────────────────────────── + +kubeconfig_paths="${KUBECONFIG:-$HOME/.kube/config}" + +mount_args=() +container_paths=() +i=0 + +IFS=':' read -ra kc_files <<< "$kubeconfig_paths" +for f in "${kc_files[@]}"; do + f="${f/#\~/$HOME}" + if [[ ! -f "$f" ]]; then + echo "WARNING: kubeconfig not found, skipping: $f" >&2 + continue + fi + target="/kubeconfig/config-${i}" + mount_args+=(-v "$f:$target:ro") + container_paths+=("$target") + ((++i)) +done + +if [[ ${#container_paths[@]} -eq 0 ]]; then + echo "ERROR: no kubeconfig files found" >&2 + exit 1 +fi + +# Join container paths with ':' for the in-container KUBECONFIG. +joined=$(IFS=':'; echo "${container_paths[*]}") + +# ── Auth env vars ──────────────────────────────────────────────────────────── + +env_args=(-e "KUBECONFIG=$joined") + +for var in ROX_ENDPOINT ROX_API_TOKEN ROX_ADMIN_PASSWORD ROX_ADMIN_USER; do + if [[ -n "${!var:-}" ]]; then + env_args+=(-e "$var=${!var}") + fi +done + +# ── Run ────────────────────────────────────────────────────────────────────── + +exec "$CONTAINER_RT" run --rm \ + "${mount_args[@]}" \ + "${env_args[@]}" \ + "$IMAGE" \ + "$@" diff --git a/scripts/compliance-operator-importer/specs/00-spec-process.md b/scripts/compliance-operator-importer/specs/00-spec-process.md new file mode 100644 index 0000000000000..beb6beaf159ef --- /dev/null +++ b/scripts/compliance-operator-importer/specs/00-spec-process.md @@ -0,0 +1,57 @@ +# 00 - Spec Process and Quality Gates + +## Purpose + +Translate product intent into executable behavior and contract specs before writing implementation code. + +## Community best-practice principles applied + +- **Behavior over implementation:** specs describe externally observable outcomes, not internal algorithms. +- **Single source of truth:** these specs replace ad-hoc task notes; code and tests must trace back to them. +- **Executable examples:** each important rule is captured as concrete scenario(s), preferably data-driven. +- **Contract-first boundaries:** external interfaces (CLI, ACS API payload shape, report output) are specified explicitly. +- **Low brittleness assertions:** tests assert fields that matter to consumers, avoid incidental details. + +## Requirement key words + +- `MUST`: mandatory behavior. +- `SHOULD`: strongly recommended unless justified deviation. +- `MAY`: optional. + +## Traceability model + +Every requirement gets an ID: +- `IMP-CLI-*` for CLI/config contract +- `IMP-MAP-*` for CO -> ACS mapping +- `IMP-IDEM-*` for idempotency/conflicts +- `IMP-ERR-*` for errors/retries/reporting +- `IMP-ACC-*` for acceptance/runtime checks + +Implementation and tests MUST annotate requirement IDs in comments or test names. + +## Spec execution strategy + +### Unit-level specs +- Parsing/validation (flags, env, config file). +- Mapping translation (CO objects -> ACS payload). +- Diff/idempotency logic. +- Retry classification. + +### Integration-level specs +- Kubernetes read path for CO resources. +- ACS API client interactions (`GET/POST/PUT`). +- Dry-run no-write guarantees. + +### Acceptance-level specs +- End-to-end execution against real cluster and ACS endpoint. +- Idempotency second-run no-op behavior. + +## Quality gates + +Before merging implementation: + +1. `MUST` requirements implemented. +2. All mapped scenarios have tests. +3. Dry-run validated as side-effect free. +4. Real-cluster acceptance checks pass. +5. No product runtime code path changes in Sensor/Central. diff --git a/scripts/compliance-operator-importer/specs/01-cli-and-config-contract.md b/scripts/compliance-operator-importer/specs/01-cli-and-config-contract.md new file mode 100644 index 0000000000000..d23626d9b83e8 --- /dev/null +++ b/scripts/compliance-operator-importer/specs/01-cli-and-config-contract.md @@ -0,0 +1,147 @@ +# 01 - CLI and Config Contract + +## Goal + +Define the importer interface so it can be implemented and tested predictably. + +## Inputs contract + +### Required inputs + +- **IMP-CLI-001**: importer MUST accept Central endpoint (`--endpoint` or `ROX_ENDPOINT`). + - if the value does not contain a scheme, importer MUST prepend `https://`. + - if the value starts with `http://`, importer MUST error. +- **IMP-CLI-002**: importer MUST support auth modes, auto-inferred from available credentials + (no explicit `--auth-mode` flag, no env-var-name indirection): + - token mode: when `ROX_API_TOKEN` is set, + - basic mode: when `ROX_ADMIN_PASSWORD` is set, + - if both are set: error ("ambiguous auth"), + - if neither is set: error with help text listing both options. +- **IMP-CLI-003**: importer MUST load each kubeconfig file independently (no merging): + - file discovery follows standard kubectl rules: `KUBECONFIG` env var (colon-separated + paths) or `~/.kube/config`. + - each file in the `KUBECONFIG` path is loaded in isolation. Contexts, users, and + clusters defined in one file never interact with entries in another file. This + prevents credential collisions when multiple files define the same user name + (e.g. `admin`) with different certificates. + - by default, the importer iterates **all contexts** across all files, treating + each context as a separate source cluster. + - when the same context name appears in multiple files, both are processed + independently with their own credentials. + - `--context ` (repeatable, optional): filters which contexts to use. Matches + against context names across all files. When given, only matching contexts are + processed; all others are skipped. + - for each context, the ACS cluster ID is auto-discovered (see IMP-MAP-016..018). +- **IMP-CLI-004**: importer MUST support namespace scope: + - `--co-namespace ` (default `openshift-compliance`) for single namespace, or + - `--co-all-namespaces` for cluster-wide scan. + +### Optional inputs + +- **IMP-CLI-006**: importer default mode is create-only; `--overwrite-existing` enables update mode. +- **IMP-CLI-007**: `--dry-run` MUST disable all ACS write operations. +- **IMP-CLI-008**: `--report-json ` for structured report output. +- **IMP-CLI-009**: `--request-timeout ` default `30s`. +- **IMP-CLI-010**: `--max-retries ` default `5`, min `0`. +- **IMP-CLI-011**: `--ca-cert-file ` optional. +- **IMP-CLI-012**: `--insecure-skip-verify` default false; MUST require explicit flag. +- **IMP-CLI-024**: for basic mode: + - username is read from `--username` flag or `ROX_ADMIN_USER` env var (default `admin`). + - password is read from `ROX_ADMIN_PASSWORD` env var (no flag). +- **IMP-CLI-025**: importer MUST reject ambiguous auth config: + - both `ROX_API_TOKEN` and `ROX_ADMIN_PASSWORD` are set → error, + - neither is set → error with help text. +- **IMP-CLI-027**: `--overwrite-existing` (default `false`): + - when `false`: existing ACS scan configs with matching `scanName` are skipped (create-only). + - when `true`: existing ACS scan configs with matching `scanName` are updated via + `PUT /v2/compliance/scan/configurations/{id}`. + +## Preflight checks + +- **IMP-CLI-013**: `--endpoint` MUST use HTTPS: + - bare hostname/port (no scheme) → `https://` is prepended automatically, + - `https://...` → accepted as-is, + - `http://...` → error. +- **IMP-CLI-014**: auth material for inferred mode MUST be non-empty: + - token mode: `ROX_API_TOKEN` is non-empty, + - basic mode: `ROX_ADMIN_PASSWORD` is non-empty (username defaults to `admin`). +- **IMP-CLI-015**: importer MUST probe ACS auth with: + - `GET /v2/compliance/scan/configurations?pagination.limit=1` + - using selected auth mode, + - success only on HTTP 200. +- **IMP-CLI-016**: HTTP 401/403 at preflight MUST fail-fast with remediation message. +- **IMP-CLI-016a**: TLS certificate verification failures at preflight MUST hint at + `--ca-cert-file` and `--insecure-skip-verify` (not generic network connectivity), + since the most common cause is a self-signed or internal CA certificate. + +## Output contract + +### Exit codes + +- **IMP-CLI-017**: `0` when run completed with no failed bindings. +- **IMP-CLI-018**: `1` for fatal preflight/config errors (no import attempted). +- **IMP-CLI-019**: `2` for partial success (some bindings failed). + +### Console summary + +- **IMP-CLI-020**: print totals: + - bindings discovered + - creates/skips/failures + - dry-run indicator + +### JSON report shape + +- **IMP-CLI-021**: when `--report-json` is set, write valid JSON with: + - `meta` (timestamp, dryRun, namespaceScope, mode=`create-only` | `create-or-update`) + - `counts` (discovered, create, skip, failed) + - `items[]`: + - `source` (`namespace`, `bindingName`, `scanSettingName`) + - `action` (`create|skip|fail`) + - `reason` + - `attempts` + - `acsScanConfigId` (if known) + - `error` (if failed) + - `problems[]`: + - `severity` (`error|warning`) + - `category` (`input|mapping|conflict|auth|api|retry|validation`) + - `resourceRef` (`namespace/name` or synthetic ref for non-resource errors) + - `description` (what happened) + - `fixHint` (how to fix) + - `skipped` (boolean; true when resource was skipped) + +- **IMP-CLI-022**: whenever any problem occurs for a resource, importer MUST: + - skip that resource, + - append one `problems[]` entry with `description` and `fixHint`, + - continue processing other resources. + +## Existing ACS config behavior + +- **IMP-IDEM-001**: when `--overwrite-existing` is `false` (default) and `scanName` already exists + in ACS, importer MUST skip that source resource. +- **IMP-IDEM-002**: skipped-existing resources MUST be added to `problems[]` with category `conflict` + and a fix hint. +- **IMP-IDEM-003**: when `--overwrite-existing` is `false`, importer MUST NOT send `PUT` updates. +- **IMP-IDEM-008**: when `--overwrite-existing` is `true` and `scanName` already exists in ACS, + importer MUST update it via `PUT /v2/compliance/scan/configurations/{id}`. +- **IMP-IDEM-009**: when `--overwrite-existing` is `true` and `scanName` does not exist, + importer MUST create it via `POST` (same as IMP-IDEM-001 create path). + +Example minimal report skeleton: + +```json +{ + "meta": { + "dryRun": true, + "namespaceScope": "openshift-compliance", + "mode": "create-only" + }, + "counts": { + "discovered": 2, + "create": 1, + "skip": 1, + "failed": 0 + }, + "items": [], + "problems": [] +} +``` diff --git a/scripts/compliance-operator-importer/specs/02-co-to-acs-mapping.feature b/scripts/compliance-operator-importer/specs/02-co-to-acs-mapping.feature new file mode 100644 index 0000000000000..5f5b4fef3d574 --- /dev/null +++ b/scripts/compliance-operator-importer/specs/02-co-to-acs-mapping.feature @@ -0,0 +1,202 @@ +Feature: Map Compliance Operator scheduled scan resources to ACS scan configurations + As an operator + I want importer behavior defined by examples + So implementation can be verified against stable expected outcomes + + Background: + Given ACS endpoint and token preflight succeeded + And the importer can read compliance.openshift.io resources + + @mapping @name + Scenario: Use ScanSettingBinding name as scanName + Given a ScanSettingBinding "cis-weekly" in namespace "openshift-compliance" + And the binding references ScanSetting "default-auto-apply" + And the binding references profiles: + | name | kind | + | ocp4-cis-node | Profile | + | ocp4-cis-master | Profile | + | my-tailored-profile | TailoredProfile | + When the importer builds the ACS payload + Then payload.scanName MUST equal "cis-weekly" # IMP-MAP-001 + And payload.scanConfig.profiles MUST equal: + | value | + | my-tailored-profile | + | ocp4-cis-master | + | ocp4-cis-node | # sorted, deduped + + @mapping @profiles + Scenario: Default missing profile kind to Profile + Given a ScanSettingBinding profile reference "custom-x" with no kind + When the importer resolves profile references + Then the profile reference kind MUST be treated as "Profile" # IMP-MAP-002 + And the resulting ACS profile name list MUST include "custom-x" + + @mapping @schedule + Scenario: Convert ScanSetting schedule into ACS schedule + Given ScanSetting "daily-scan" has schedule "0 0 * * *" + And ScanSettingBinding "daily-cis" references "daily-scan" + When the importer maps schedule fields + Then payload.scanConfig.oneTimeScan MUST be false # IMP-MAP-003 + And payload.scanConfig.scanSchedule MUST be present # IMP-MAP-004 + + @mapping @schedule @wire-format + Scenario Outline: Schedule JSON wire format matches ACS API proto + Given ScanSetting "sched" has schedule "" + And ScanSettingBinding "binding" references "sched" + When the importer builds the ACS payload and serializes it to JSON + Then the JSON scanSchedule object MUST contain only fields defined in proto/api/v2/common.proto Schedule: + intervalType, hour, minute, daysOfWeek, daysOfMonth # IMP-MAP-004a + And the JSON scanSchedule.intervalType MUST be "" + And for WEEKLY: scanSchedule.daysOfWeek.days MUST be present # IMP-MAP-004b + And for MONTHLY: scanSchedule.daysOfMonth.days MUST be present # IMP-MAP-004c + And the full payload JSON field names MUST match ComplianceScanConfiguration proto: + id, scanName, scanConfig, clusters # IMP-MAP-004d + + Examples: + | cron | intervalType | + | 0 2 * * * | DAILY | + | 0 2 * * 0 | WEEKLY | + | 0 2 1 * * | MONTHLY | + + @mapping @description + Scenario: Build helpful description without ownership marker + Given ScanSettingBinding "cis-weekly" in namespace "openshift-compliance" + When the importer builds payload description + Then payload.scanConfig.description MUST contain "Imported from CO ScanSettingBinding openshift-compliance/cis-weekly" # IMP-MAP-005 + And payload.scanConfig.description SHOULD include settings reference context # IMP-MAP-006 + + @mapping @clusters + Scenario: Auto-discover ACS cluster ID from admission-control ConfigMap + Given kubecontext "ctx-a" points to a secured cluster + And ConfigMap "admission-control" in namespace "stackrox" has data key "cluster-id" = "uuid-a" + When the importer resolves the ACS cluster ID for "ctx-a" + Then the resolved ACS cluster ID MUST be "uuid-a" # IMP-MAP-016 + + @mapping @clusters + Scenario: Fallback to OpenShift ClusterVersion for cluster matching + Given kubecontext "ctx-b" points to an OpenShift cluster + And ConfigMap "admission-control" is not readable + And ClusterVersion "version" has spec.clusterID "ocp-uuid-b" + And ACS cluster list contains a cluster with providerMetadata.cluster.id "ocp-uuid-b" and ACS ID "acs-uuid-b" + When the importer resolves the ACS cluster ID for "ctx-b" + Then the resolved ACS cluster ID MUST be "acs-uuid-b" # IMP-MAP-017 + + @mapping @clusters + Scenario: Fallback to helm-effective-cluster-name for cluster matching + Given kubecontext "ctx-c" points to a cluster + And ConfigMap "admission-control" is not readable + And ClusterVersion is not available + And Secret "helm-effective-cluster-name" has data key "cluster-name" = "my-cluster" + And ACS cluster list contains a cluster named "my-cluster" with ACS ID "acs-uuid-c" + When the importer resolves the ACS cluster ID for "ctx-c" + Then the resolved ACS cluster ID MUST be "acs-uuid-c" # IMP-MAP-018 + + @mapping @clusters + Scenario: All discovery methods fail with detailed per-method errors + Given kubecontext "ctx-d" points to a cluster + And ConfigMap "admission-control" is not readable (returns "Unauthorized") + And ClusterVersion is not available (returns "Unauthorized") + And Secret "helm-effective-cluster-name" is not readable (returns "Unauthorized") + When the importer resolves the ACS cluster ID for "ctx-d" + Then the error MUST list each method's failure reason # IMP-MAP-016a + + @mapping @clusters @multicluster + Scenario: Merge SSBs with same name across clusters + Given kubecontext "ctx-a" has ScanSettingBinding "cis-weekly" with profiles ["ocp4-cis"] and schedule "0 2 * * 0" + And kubecontext "ctx-b" has ScanSettingBinding "cis-weekly" with profiles ["ocp4-cis"] and schedule "0 2 * * 0" + And ctx-a resolves to ACS cluster ID "uuid-a" + And ctx-b resolves to ACS cluster ID "uuid-b" + When the importer merges SSBs across clusters + Then one ACS scan config MUST be created with scanName "cis-weekly" # IMP-MAP-019 + And payload.clusters MUST equal: + | value | + | uuid-a | + | uuid-b | # IMP-MAP-021 + + @mapping @clusters @multicluster @error + Scenario: Error when same-name SSBs have mismatched profiles + Given kubecontext "ctx-a" has ScanSettingBinding "cis-weekly" with profiles ["ocp4-cis"] + And kubecontext "ctx-b" has ScanSettingBinding "cis-weekly" with profiles ["ocp4-cis", "ocp4-moderate"] + When the importer merges SSBs across clusters + Then "cis-weekly" MUST be marked failed # IMP-MAP-020 + And problems list MUST include category "conflict" + And problem description MUST mention mismatch across clusters + And the console MUST print a warning with the conflict reason # IMP-MAP-020a + + @mapping @clusters @multicluster @error + Scenario: Error when same-name SSBs have mismatched schedules + Given kubecontext "ctx-a" has ScanSettingBinding "cis-weekly" with schedule "0 2 * * 0" + And kubecontext "ctx-b" has ScanSettingBinding "cis-weekly" with schedule "0 3 * * 1" + When the importer merges SSBs across clusters + Then "cis-weekly" MUST be marked failed # IMP-MAP-020 + And problems list MUST include category "conflict" + And problem description MUST mention mismatch across clusters + And the console MUST print a warning with the conflict reason # IMP-MAP-020a + + @validation @mapping + Scenario: Missing ScanSetting reference fails only that binding + Given ScanSettingBinding "broken-binding" references ScanSetting "does-not-exist" + When the importer processes all discovered bindings + Then "broken-binding" MUST be marked failed # IMP-MAP-008 + And problems list MUST include an entry for "broken-binding" # IMP-MAP-009 + And that problem entry MUST include a fix hint # IMP-MAP-010 + And other valid bindings MUST still be processed # IMP-MAP-011 + + @mapping @adopt + Scenario: Adopt SSB after successful ACS scan config creation + Given ScanSettingBinding "cis-weekly" in namespace "openshift-compliance" + And the SSB references ScanSetting "my-old-setting" + And the importer successfully creates ACS scan config "cis-weekly" + And ACS creates ScanSetting "cis-weekly" on the cluster + When the importer runs the adoption step + Then SSB "cis-weekly" settingsRef.name MUST be patched to "cis-weekly" # IMP-ADOPT-001 + And the importer MUST log an info message about the adoption # IMP-ADOPT-002 + + @mapping @adopt + Scenario: Skip adoption when SSB already references the correct ScanSetting + Given ScanSettingBinding "cis-weekly" in namespace "openshift-compliance" + And the SSB references ScanSetting "cis-weekly" + And the importer successfully creates ACS scan config "cis-weekly" + When the importer runs the adoption step + Then SSB "cis-weekly" settingsRef.name MUST NOT be modified # IMP-ADOPT-003 + + @mapping @adopt @timeout + Scenario: Adoption warns on timeout waiting for ScanSetting + Given ScanSettingBinding "cis-weekly" in namespace "openshift-compliance" + And the SSB references ScanSetting "my-old-setting" + And the importer successfully creates ACS scan config "cis-weekly" + And ACS has NOT yet created ScanSetting "cis-weekly" on the cluster + When the adoption poll times out + Then the importer MUST log a warning # IMP-ADOPT-004 + And the SSB MUST NOT be modified # IMP-ADOPT-005 + And the importer MUST NOT exit with an error # IMP-ADOPT-006 + + @mapping @adopt @multicluster + Scenario: Adoption patches SSBs independently per cluster + Given kubecontext "ctx-a" has SSB "cis-weekly" referencing ScanSetting "setting-a" + And kubecontext "ctx-b" has SSB "cis-weekly" referencing ScanSetting "setting-b" + And the importer creates one ACS scan config "cis-weekly" for both clusters + And ACS creates ScanSetting "cis-weekly" on both clusters + When the importer runs the adoption step + Then SSB "cis-weekly" on ctx-a MUST be patched to reference "cis-weekly" # IMP-ADOPT-007 + And SSB "cis-weekly" on ctx-b MUST be patched to reference "cis-weekly" # IMP-ADOPT-007 + + @mapping @adopt @multicluster @partial + Scenario: Partial adoption succeeds when one cluster times out + Given kubecontext "ctx-a" has SSB "cis-weekly" referencing ScanSetting "setting-a" + And kubecontext "ctx-b" has SSB "cis-weekly" referencing ScanSetting "setting-b" + And ACS creates ScanSetting "cis-weekly" on ctx-a but NOT on ctx-b + When the importer runs the adoption step + Then SSB "cis-weekly" on ctx-a MUST be patched # IMP-ADOPT-008 + And the importer MUST warn about ctx-b timeout # IMP-ADOPT-008 + And the importer MUST NOT exit with an error # IMP-ADOPT-006 + + @mapping @schedule @problems + Scenario: Invalid schedule is collected as problem and skipped + Given ScanSetting "bad-schedule" has schedule "every day at noon" + And ScanSettingBinding "broken-schedule-binding" references "bad-schedule" + When the importer maps schedule fields + Then "broken-schedule-binding" MUST be skipped # IMP-MAP-012 + And problems list MUST include category "mapping" # IMP-MAP-013 + And problem description MUST mention schedule conversion failed # IMP-MAP-014 + And problem fix hint MUST suggest using a valid cron expression # IMP-MAP-015 diff --git a/scripts/compliance-operator-importer/specs/03-idempotency-dry-run-retries.feature b/scripts/compliance-operator-importer/specs/03-idempotency-dry-run-retries.feature new file mode 100644 index 0000000000000..55b2e69f56230 --- /dev/null +++ b/scripts/compliance-operator-importer/specs/03-idempotency-dry-run-retries.feature @@ -0,0 +1,91 @@ +Feature: Create-only idempotency dry-run behavior and retry policy + As an operator + I want safe reruns and predictable failure handling + So importer usage is low risk in production environments + + Background: + Given ACS endpoint and token preflight succeeded + And desired payload for source "openshift-compliance/cis-weekly" is computed + + @idempotency + Scenario: Create when scanName does not exist + Given ACS has no scan configuration with scanName "cis-weekly" + When importer executes in apply mode + Then importer MUST send POST /v2/compliance/scan/configurations # IMP-IDEM-001 + And action MUST be "create" + + @idempotency + Scenario: Skip when scanName already exists (default mode) + Given ACS has scan configuration with scanName "cis-weekly" + And --overwrite-existing is false + When importer executes in apply mode + Then importer MUST NOT send PUT # IMP-IDEM-003 + And action MUST be "skip" + And reason MUST include "already exists" + And problems list MUST include conflict category # IMP-IDEM-002 + + @idempotency @overwrite + Scenario: Update when scanName already exists and --overwrite-existing is true + Given ACS has scan configuration with scanName "cis-weekly" and id "existing-id" + And --overwrite-existing is true + When importer executes in apply mode + Then importer MUST send PUT /v2/compliance/scan/configurations/existing-id # IMP-IDEM-008 + And action MUST be "update" + + @idempotency @overwrite + Scenario: Create when scanName does not exist and --overwrite-existing is true + Given ACS has no scan configuration with scanName "new-scan" + And --overwrite-existing is true + When importer executes in apply mode + Then importer MUST send POST /v2/compliance/scan/configurations # IMP-IDEM-009 + And action MUST be "create" + + @dryrun + Scenario: Dry-run performs no writes + Given importer is started with --dry-run + And at least one action would be create in apply mode + When importer completes + Then importer MUST NOT send POST # IMP-IDEM-004 + And importer MUST NOT send PUT # IMP-IDEM-005 + And planned actions MUST be included in report # IMP-IDEM-006 + And problems list MUST still be populated for problematic resources # IMP-IDEM-007 + + @retry @transient + Scenario Outline: Retry transient ACS write failures + Given an ACS create operation returns HTTP for first 2 attempts + And the 3rd attempt succeeds + When importer executes in apply mode + Then operation MUST be retried with backoff # IMP-ERR-001 + And total attempts MUST be 3 + + Examples: + | status | + | 429 | + | 502 | + | 503 | + | 504 | + + @retry @nontransient + Scenario Outline: Do not retry non-transient errors + Given an ACS create operation returns HTTP + When importer executes in apply mode + Then operation MUST NOT be retried # IMP-ERR-002 + And the item MUST be skipped and recorded as a problem # IMP-ERR-004 + + Examples: + | status | + | 400 | + | 401 | + | 403 | + | 404 | + + @exitcodes + Scenario Outline: Exit code reflects outcome category + Given importer run ends with outcome "" + Then process exit code MUST be # IMP-ERR-003 + + Examples: + | outcome | code | + | all successful | 0 | + | fatal preflight failure | 1 | + | partial binding failures | 2 | diff --git a/scripts/compliance-operator-importer/specs/04-validation-and-acceptance.md b/scripts/compliance-operator-importer/specs/04-validation-and-acceptance.md new file mode 100644 index 0000000000000..1a65a92e07cc1 --- /dev/null +++ b/scripts/compliance-operator-importer/specs/04-validation-and-acceptance.md @@ -0,0 +1,171 @@ +# 04 - Validation and Acceptance Spec + +This document is the acceptance test contract for real-cluster validation. + +## Preconditions + +- `kubectl`, `curl`, `jq` installed. +- Logged into target cluster containing Compliance Operator resources. +- Central endpoint reachable from runner. +- Importer binary built locally. + +Set environment: + +```bash +export ROX_ENDPOINT="https://central.stackrox.example.com:443" +export ROX_API_TOKEN="" +export ROX_ADMIN_USER="admin" +export ROX_ADMIN_PASSWORD="" +export CO_NAMESPACE="openshift-compliance" +export IMPORTER_BIN="./bin/co-acs-scan-importer" +# For multi-cluster: merge kubeconfigs +export KUBECONFIG="~/.kube/config:~/.kube/config-secured-cluster" +``` + +## Acceptance checks + +### A1 - CO resource discovery + +- **IMP-ACC-001**: importer test run MUST begin only if required CO resource types are listable. + +Commands: + +```bash +kubectl get scansettingbindings.compliance.openshift.io -n "${CO_NAMESPACE}" +kubectl get scansettings.compliance.openshift.io -n "${CO_NAMESPACE}" +kubectl get profiles.compliance.openshift.io -n "${CO_NAMESPACE}" +kubectl get tailoredprofiles.compliance.openshift.io -n "${CO_NAMESPACE}" || true +``` + +Pass condition: + +- first 3 commands succeed (exit 0). + +### A2 - ACS auth preflight + +- **IMP-ACC-002**: token and endpoint MUST pass read probe. +- **IMP-ACC-013**: optional basic-auth mode MUST pass read probe in local/dev environments. + +Command: + +```bash +curl -ksS \ + -H "Authorization: Bearer ${ROX_API_TOKEN}" \ + "${ROX_ENDPOINT}/v2/compliance/scan/configurations?pagination.limit=1" | jq . +``` + +Pass condition: + +- command returns valid JSON and does not contain auth error. + +Optional local/dev basic-auth probe: + +```bash +curl -ksS \ + -u "${ROX_ADMIN_USER}:${ROX_ADMIN_PASSWORD}" \ + "${ROX_ENDPOINT}/v2/compliance/scan/configurations?pagination.limit=1" | jq . +``` + +### A3 - Dry-run side-effect safety + +- **IMP-ACC-003**: dry-run MUST produce no writes. + +Command (auto-discovery mode): + +```bash +"${IMPORTER_BIN}" \ + --endpoint "${ROX_ENDPOINT}" \ + --dry-run \ + --report-json "/tmp/co-acs-import-dryrun.json" +``` + +Pass conditions: + +- exit code is `0` or `2`, +- `/tmp/co-acs-import-dryrun.json` exists and is valid JSON, +- actions listed as planned only (no applied create/update markers), +- `problems[]` is present and contains `description` + `fixHint` for each problematic resource. + +### A4 - Apply creates expected configs + +- **IMP-ACC-004**: apply mode MUST create missing target ACS configs. + +Command (auto-discovery mode): + +```bash +"${IMPORTER_BIN}" \ + --endpoint "${ROX_ENDPOINT}" \ + --report-json "/tmp/co-acs-import-apply.json" +``` + +Verify: + +```bash +curl -ksS \ + -H "Authorization: Bearer ${ROX_API_TOKEN}" \ + "${ROX_ENDPOINT}/v2/compliance/scan/configurations?pagination.limit=200" | \ + jq '.configurations[] | {id, scanName, profiles: .scanConfig.profiles, description: .scanConfig.description}' +``` + +Pass conditions: + +- expected imported scan names exist, +- profile lists match expected binding mappings. + +### A5 - Idempotency on second run + +- **IMP-ACC-005**: second run with same inputs MUST be no-op. + +Command: + +```bash +"${IMPORTER_BIN}" \ + --endpoint "${ROX_ENDPOINT}" \ + --report-json "/tmp/co-acs-import-second-run.json" +``` + +Pass conditions: + +- report shows skip actions for already-existing scan names, +- no net changes in ACS list output. + +### A6 - Existing config behavior + +- **IMP-ACC-006**: without `--overwrite-existing`, existing scan names MUST be skipped + and recorded in `problems[]`. +- **IMP-ACC-014**: with `--overwrite-existing`, existing scan names MUST be updated via PUT. + +Procedure (create-only): + +1. Manually modify one imported ACS scan config (name unchanged). +2. Re-run importer without `--overwrite-existing`. +3. Verify that modified existing config is not updated and is captured as skipped conflict. + +Procedure (overwrite): + +1. Re-run importer with `--overwrite-existing`. +2. Verify that the modified config is updated back to the imported state. + +### A8 - Multi-cluster merge + +- **IMP-ACC-015**: when the same SSB name exists on multiple source clusters with matching + profiles and schedule, importer MUST create one scan config targeting all resolved cluster IDs. +- **IMP-ACC-016**: when the same SSB name exists on multiple source clusters with different + profiles or schedule, importer MUST error for that SSB name. + +### A9 - Auto-discovery + +- **IMP-ACC-017**: importer MUST auto-discover the ACS cluster ID from the admission-control + ConfigMap's `cluster-id` key. + +### A7 - Failure paths + +- **IMP-ACC-007**: invalid token MUST fail-fast with exit code `1`. +- **IMP-ACC-008**: missing referenced ScanSetting MUST fail only that binding (partial run exit code `2` when others succeed). +- **IMP-ACC-009**: transient ACS failures MUST follow retry policy and record attempt counts. +- **IMP-ACC-012**: all per-resource problems MUST be emitted in `problems[]` with remediation hint. + +## Non-goal compliance checks + +- **IMP-ACC-010**: no code changes in Sensor/Central runtime paths are required to run importer. +- **IMP-ACC-011**: importer MUST not mutate Compliance Operator resources. diff --git a/scripts/compliance-operator-importer/specs/05-traceability-matrix.md b/scripts/compliance-operator-importer/specs/05-traceability-matrix.md new file mode 100644 index 0000000000000..9e24a1ca5da18 --- /dev/null +++ b/scripts/compliance-operator-importer/specs/05-traceability-matrix.md @@ -0,0 +1,19 @@ +# 05 - Traceability Matrix + +Use this matrix to ensure complete implementation coverage. + +|Requirement ID|Spec source|Test level|Notes| +|---|---|---|---| +|IMP-CLI-001..027|`01-cli-and-config-contract.md`|Unit + integration|CLI parsing, preflight, auth modes, multi-cluster, --overwrite-existing| +|IMP-MAP-001..021, IMP-MAP-020a|`02-co-to-acs-mapping.feature`|Unit + integration|Mapping, schedule, cluster auto-discovery, SSB merging, merge conflict console output| +|IMP-IDEM-001..009|`03-idempotency-dry-run-retries.feature`|Unit + integration|Idempotency, overwrite mode (PUT), dry-run reporting| +|IMP-ERR-001..004|`03-idempotency-dry-run-retries.feature`|Unit + integration|Retry classes, skip-on-error behavior, exit code outcomes| +|IMP-ACC-001..017|`04-validation-and-acceptance.md`|Acceptance|Real cluster, ACS verification, multi-cluster merge, auto-discovery| +|IMP-IMG-001..006|`07-container-image.md`|Build + smoke|Dockerfile, static binary, multi-arch manifest, image size| + +## Coverage rule + +For each requirement ID, implementation PR MUST include: + +- at least one test case name containing that ID, and +- one short note in PR description summarizing pass evidence for that ID family. diff --git a/scripts/compliance-operator-importer/specs/06-implementation-backlog.md b/scripts/compliance-operator-importer/specs/06-implementation-backlog.md new file mode 100644 index 0000000000000..2272d0f6f9ebb --- /dev/null +++ b/scripts/compliance-operator-importer/specs/06-implementation-backlog.md @@ -0,0 +1,324 @@ +# 06 - Implementation Backlog (Spec + Agentic Execution) + +This backlog translates specs into delivery slices with strict requirement traceability. + +## Working rules + +- Implement slices in order. +- Implement production code in Go for Phase 1 (no bash/shell implementation). +- For each slice: + 1. write/enable failing tests first, + 2. implement minimum code to pass, + 3. run tests and capture evidence, + 4. list fulfilled requirement IDs in PR notes. +- Keep each slice in its own PR when possible. + +## Slice A - CLI, config, and preflight + +### A Goal + +Provide a reliable entrypoint with strict validation and preflight checks. + +### A Requirement IDs + +- `IMP-CLI-001..016` +- `IMP-CLI-024..025` + +### A Implementation targets (suggested) + +- `scripts/compliance-operator-importer/cmd/importer/main.go` +- `scripts/compliance-operator-importer/internal/config/config.go` +- `scripts/compliance-operator-importer/internal/preflight/preflight.go` + +### A Tests to add + +- `internal/config/config_test.go` +- `internal/preflight/preflight_test.go` + +### A Acceptance signal + +- Valid flags/env parse and preflight probe behavior with correct exit pathing. +- Both auth modes behave correctly: + - token mode default path, + - basic mode local/dev path. + +### A Agent prompt seed + +- "Implement Slice A for create-only importer. Start with tests for IMP-CLI-001..016 and IMP-CLI-024..025, then implement CLI/config/preflight with HTTPS and both token/basic auth mode support." + +## Slice B - CO discovery and mapping core + +### B Goal + +Discover CO resources and map into ACS create payloads. + +### B Requirement IDs + +- `IMP-MAP-001..015` + +### B Implementation targets (suggested) + +- `scripts/compliance-operator-importer/internal/cofetch/client.go` +- `scripts/compliance-operator-importer/internal/mapping/mapping.go` +- `scripts/compliance-operator-importer/internal/mapping/schedule.go` + +### B Tests to add + +- `internal/mapping/mapping_test.go` +- `internal/mapping/schedule_test.go` + +### B Acceptance signal + +- Deterministic payload creation from SSB/ScanSetting/Profile inputs. +- Invalid schedule path produces skip-worthy error with fix hint text. + +### B Agent prompt seed + +- "Implement Slice B with tests first for IMP-MAP-001..015. Ensure missing profile kind defaults to Profile and invalid schedule becomes skip+problem." + +## Slice C - ACS create-only writer and idempotency + +### C Goal + +Create missing configs, skip existing names, never update. + +### C Requirement IDs + +- `IMP-IDEM-001..007` + +### C Implementation targets (suggested) + +- `scripts/compliance-operator-importer/internal/acs/client.go` +- `scripts/compliance-operator-importer/internal/reconcile/create_only.go` + +### C Tests to add + +- `internal/reconcile/create_only_test.go` +- `internal/acs/client_test.go` + +### C Acceptance signal + +- Existing `scanName` always skipped with conflict problem. +- No code path emits `PUT`. + +### C Agent prompt seed + +- "Implement Slice C as strict create-only. Test IMP-IDEM-001..007 first, especially: existing scanName => skip + conflict problem; never call PUT." + +## Slice D - Problem list, reporting, and exit codes + +### D Goal + +Centralize error handling/reporting and enforce run outcomes. + +### D Requirement IDs + +- `IMP-CLI-017..022` +- `IMP-ERR-001..004` + +### D Implementation targets (suggested) + +- `scripts/compliance-operator-importer/internal/problems/problems.go` +- `scripts/compliance-operator-importer/internal/report/report.go` +- `scripts/compliance-operator-importer/internal/run/run.go` + +### D Tests to add + +- `internal/problems/problems_test.go` +- `internal/report/report_test.go` +- `internal/run/run_test.go` + +### D Acceptance signal + +- `problems[]` always emitted for problematic resources with `description` + `fixHint`. +- exit codes map correctly to all-success/fatal/partial outcomes. + +### D Agent prompt seed + +- "Implement Slice D with tests first for IMP-CLI-017..022 and IMP-ERR-001..004. Ensure problem list and exit code semantics exactly match spec." + +## Slice E - Multi-cluster support and auto-discovery + +### E Goal + +Support multiple source clusters, auto-discover ACS cluster IDs, merge SSBs across clusters. + +### E Requirement IDs + +- `IMP-CLI-003`, `IMP-CLI-027` +- `IMP-MAP-016..021` +- `IMP-ACC-015..017` + +### E Implementation targets (suggested) + +- `scripts/compliance-operator-importer/internal/config/config.go` (--context filter) +- `scripts/compliance-operator-importer/internal/discover/discover.go` (new package: ACS cluster ID auto-discovery) +- `scripts/compliance-operator-importer/internal/cofetch/client.go` (multi-context support) +- `scripts/compliance-operator-importer/internal/merge/merge.go` (new package: SSB merging + mismatch detection) +- `scripts/compliance-operator-importer/internal/run/run.go` (orchestrate multi-cluster flow) + +### E Tests to add + +- `internal/discover/discover_test.go` +- `internal/merge/merge_test.go` +- `internal/config/config_test.go` (new flag tests) +- `internal/run/run_test.go` (multi-cluster integration) + +### E Acceptance signal + +- Auto-discovery resolves ACS cluster IDs from admission-control ConfigMap on real clusters. +- SSBs with same name across clusters produce one merged scan config. +- SSBs with same name but different profiles/schedule produce an error. + +### E Agent prompt seed + +- "Implement Slice E: multi-cluster support. Iterate all contexts from merged kubeconfig, auto-discover ACS cluster ID via admission-control ConfigMap (fallback: ClusterVersion, helm-effective-cluster-name), merge SSBs by name across clusters, error on profile/schedule mismatch." + +## Slice F - Overwrite-existing mode (PUT support) + +### F Goal + +Allow importer to update existing ACS scan configs instead of skipping them. + +### F Requirement IDs + +- `IMP-CLI-027`, `IMP-IDEM-008..009`, `IMP-ACC-014` + +### F Implementation targets (suggested) + +- `scripts/compliance-operator-importer/internal/models/models.go` (add UpdateScanConfiguration to ACSClient interface) +- `scripts/compliance-operator-importer/internal/acs/client.go` (implement PUT) +- `scripts/compliance-operator-importer/internal/reconcile/create_only.go` (rename to reconciler.go, add update path) +- `scripts/compliance-operator-importer/internal/config/config.go` (--overwrite-existing flag) + +### F Tests to add + +- `internal/reconcile/reconciler_test.go` (update path tests) +- `internal/acs/client_test.go` (PUT tests) + +### F Acceptance signal + +- With `--overwrite-existing`, existing scan configs are updated via PUT. +- Without the flag, behavior is unchanged (skip + conflict problem). + +### F Agent prompt seed + +- "Implement Slice F: --overwrite-existing flag. Add PUT to ACS client, update reconciler to call PUT when flag is set and scanName exists. Add UpdateScanConfiguration and DeleteScanConfiguration to ACSClient interface." + +## Slice G - End-to-end acceptance and tooling + +### G Goal + +Make real-cluster validation repeatable and scriptable. + +### G Requirement IDs + +- `IMP-ACC-001..017` + +### G Implementation targets (suggested) + +- `scripts/compliance-operator-importer/hack/acceptance-run.sh` +- `scripts/compliance-operator-importer/hack/check-report.sh` + +### G Tests/checks to add + +- lightweight script tests where practical. +- documented manual acceptance evidence for cluster runs. + +### G Acceptance signal + +- all commands/checks in `specs/04-validation-and-acceptance.md` are reproducible. +- include at least one real-cluster proof run against a live ACS endpoint with artifact output. +- multi-cluster and overwrite scenarios tested against real clusters. + +### G Agent prompt seed + +- "Implement Slice G automation helpers for IMP-ACC-001..017 and produce run artifacts paths for dry-run/apply/second-run/multi-cluster/overwrite checks." + +## Slice H - UX conventions -- DONE + +### H Goal + +Ensure all flags and env vars follow consistent conventions. Auth mode is +auto-inferred from available credentials. Endpoint handling prepends `https://` +when no scheme is provided. + +### H Requirement IDs + +- `IMP-CLI-001` +- `IMP-CLI-002` +- `IMP-CLI-013` +- `IMP-CLI-024` +- `IMP-CLI-025` + +## Slice I - Simplify cluster access model + +### I Goal + +Iterate all contexts from the merged kubeconfig by default, with an +opt-in `--context` filter. ACS cluster ID is always auto-discovered. + +### I Requirement IDs + +- `IMP-CLI-003` + +### I Implementation targets + +- `internal/models/models.go` (remove Kubeconfigs, Kubecontexts, ClusterOverrides, ClusterNameLookup, AutoDiscoverClusterID; add Contexts) +- `internal/config/config.go` (remove old flags, add --context, remove classifyClusterValues) +- `internal/run/cluster_source.go` (simplify: always load all contexts, filter by Contexts) +- `internal/cofetch/client.go` (remove NewClientForKubeconfig) +- `cmd/importer/main.go` (simplify: always BuildClusterSources + RunMultiCluster) +- `internal/config/config_test.go` +- `internal/config/config_multicluster_test.go` + +### I Agent prompt seed + +- "Implement Slice I: drop --kubeconfig, --kubecontext, --cluster. Default to all contexts from merged kubeconfig. Add --context (repeatable) as opt-in filter. Always auto-discover ACS cluster ID. Simplify BuildClusterSources and main.go accordingly." + +## Slice J - Container image packaging + +### J Goal + +Package the importer as a minimal, multi-arch container image for distribution. + +### J Requirement IDs + +- `IMP-IMG-001..005` + +### J Implementation targets + +- `scripts/compliance-operator-importer/Dockerfile` +- `scripts/compliance-operator-importer/Makefile` (image, image-multiarch, image-push targets) +- `scripts/compliance-operator-importer/.dockerignore` + +### J Acceptance signal + +- `make image` builds and `docker run --rm $IMAGE --help` prints usage. +- `make image-push` builds per-arch images and creates manifest list. + +## Cross-slice conventions + +- Requirement IDs must appear in test names or comments. +- Keep mapping logic side-effect free where possible. +- Wrap external clients (k8s/ACS) behind interfaces for deterministic tests. +- Never mutate CO resources. +- Guard rail test: without `--overwrite-existing`, no `PUT` is ever sent. +- Verify behavior with real-world examples early and often, not only mocked tests. +- Capture smoke-test commands and outputs in PR notes for traceability. + +## Suggested execution order and ownership + +1. Slice A (platform/entrypoint) -- DONE +2. Slice B (domain mapping) -- DONE +3. Slice C (ACS reconciliation) -- DONE +4. Slice D (reporting + run orchestration) -- DONE +5. Slice E (multi-cluster + auto-discovery) -- DONE +6. Slice F (overwrite-existing / PUT support) -- DONE +7. Slice G (acceptance automation) -- DONE +8. Slice H (UX conventions) -- DONE +9. Slice I (simplify cluster access model) -- DONE +10. Slice J (container image packaging) + +Slices E and F are independent and can be implemented in parallel. +One agent per slice is ideal; if sequential, complete one slice fully before next. diff --git a/scripts/compliance-operator-importer/specs/07-container-image.md b/scripts/compliance-operator-importer/specs/07-container-image.md new file mode 100644 index 0000000000000..3e53da2c706c4 --- /dev/null +++ b/scripts/compliance-operator-importer/specs/07-container-image.md @@ -0,0 +1,59 @@ +# 07 - Container Image Packaging + +This spec defines how the importer is packaged and distributed as a +multi-architecture container image. + +## Design decisions + +- Single-stage build using `ubi9-micro` as base (includes CA certificates). +- Multi-arch: `linux/amd64` and `linux/arm64`. +- Static binary: `CGO_ENABLED=0`, pure Go. +- Non-root: runs as UID `65534` (nobody). + +## Requirements + +### IMP-IMG-001: Dockerfile + +The Dockerfile MUST: +- Use `registry.access.redhat.com/ubi9-micro:latest` as base. +- COPY the pre-compiled binary as `/compliance-operator-importer`. +- Set `USER 65534:65534`. +- Set `ENTRYPOINT ["/compliance-operator-importer"]`. + +### IMP-IMG-002: Static binary + +The Go binary MUST be compiled with: +- `CGO_ENABLED=0` +- `GOOS=linux` +- `GOARCH` set to the target architecture (`amd64` or `arm64`) + +### IMP-IMG-003: Multi-architecture support + +The build MUST: +- Build the Go binary once per target architecture. +- Build a container image per architecture, tagged with an `-$ARCH` suffix + (e.g. `$IMAGE:$TAG-amd64`, `$IMAGE:$TAG-arm64`). +- Create a multi-arch manifest list under the plain tag + (`$IMAGE:$TAG`) combining all architecture-specific images. +- Support at least `linux/amd64` and `linux/arm64`. + +### IMP-IMG-004: Build targets + +The Makefile MUST provide: +- `make image` — build container image for the host architecture. +- `make image-push` — build and push multi-arch images + manifest. +- Image name configurable via `IMAGE` env var with a placeholder default. +- Tag configurable via `TAG` env var (default: `latest`). + +### IMP-IMG-005: Image metadata + +The image MUST include OCI labels: +- `org.opencontainers.image.title=co-acs-importer` +- `org.opencontainers.image.description=Compliance Operator to ACS scan configuration importer` +- `org.opencontainers.image.source=https://github.com/stackrox/stackrox` + +## Non-goals + +- CI/CD pipeline integration (future work). +- Helm chart or operator packaging. +- Signing or SBOM generation (deferred). diff --git a/tools/roxvet/analyzers/validateimports/analyzer.go b/tools/roxvet/analyzers/validateimports/analyzer.go index f13d45fb6a1c7..75c69f3ac2a91 100644 --- a/tools/roxvet/analyzers/validateimports/analyzer.go +++ b/tools/roxvet/analyzers/validateimports/analyzer.go @@ -386,6 +386,12 @@ func verifyImportsFromAllowedPackagesOnly(pass *analysis.Pass, imports []*ast.Im } func run(pass *analysis.Pass) (interface{}, error) { + // Skip packages that belong to a different Go module entirely (e.g. sub-modules + // in the repository whose import path does not start with the rox module prefix). + // validateimports only enforces cross-package import rules within the rox module. + if !strings.HasPrefix(pass.Pkg.Path(), roxPrefix) { + return nil, nil + } root, valid, err := getRoot(pass.Pkg.Path()) if err != nil { pass.Reportf(token.NoPos, "couldn't find valid root: %v", err)