@@ -310,11 +310,13 @@ func (w *deploymentWrap) populateImageMetadata(localImages set.StringSet, pods .
310310 // The downside to this is that if different pods have different versions then we will miss that fact that pods are running
311311 // different versions and clobber it. I've added a log to illustrate the clobbering so we can see how often it happens
312312
313- // Sort the w.Deployment.Containers by name and p.Status.ContainerStatuses by name
314- // This is because the order is not guaranteed
315- sort .SliceStable (w .GetDeployment ().GetContainers (), func (i , j int ) bool {
316- return w .GetDeployment ().GetContainers ()[i ].GetName () < w .GetDeployment ().GetContainers ()[j ].GetName ()
317- })
313+ // Build a map from container name to deployment container for name-based matching.
314+ // This avoids index-based alignment which breaks when other container types are
315+ //present in deployment.Containers but not in pod container statuses.
316+ containersByName := make (map [string ]* storage.Container , len (w .GetDeployment ().GetContainers ()))
317+ for _ , c := range w .GetDeployment ().GetContainers () {
318+ containersByName [c .GetName ()] = c
319+ }
318320
319321 // Sort the pods by time created as that pod will be most likely to have the most updated spec
320322 sort .SliceStable (pods , func (i , j int ) bool {
@@ -323,19 +325,20 @@ func (w *deploymentWrap) populateImageMetadata(localImages set.StringSet, pods .
323325
324326 // Determine each image's ID, if not already populated, as well as if the image is pullable and/or cluster-local.
325327 for _ , p := range pods {
326- sort .SliceStable (p .Status .ContainerStatuses , func (i , j int ) bool {
327- return p .Status .ContainerStatuses [i ].Name < p .Status .ContainerStatuses [j ].Name
328- })
329- sort .SliceStable (p .Spec .Containers , func (i , j int ) bool {
330- return p .Spec .Containers [i ].Name < p .Spec .Containers [j ].Name
331- })
332- for i , c := range p .Status .ContainerStatuses {
333- if i >= len (w .GetDeployment ().GetContainers ()) || i >= len (p .Spec .Containers ) {
334- // This should not happen, but could happen if w.Deployment.Containers and container status are out of sync
335- break
328+ // Build a map from container name to pod spec container for name-based lookup.
329+ specContainersByName := make (map [string ]v1.Container , len (p .Spec .Containers ))
330+ for _ , sc := range p .Spec .Containers {
331+ specContainersByName [sc .Name ] = sc
332+ }
333+
334+ for _ , c := range p .Status .ContainerStatuses {
335+ deployContainer , found := containersByName [c .Name ]
336+ if ! found {
337+ log .Debugf ("Skipping container status %q with no matching deployment container for deploy %q, pod %q" , c .Name , w .GetDeployment ().GetName (), p .GetName ())
338+ continue
336339 }
337340
338- image := w . GetDeployment (). GetContainers ()[ i ] .GetImage ()
341+ image := deployContainer .GetImage ()
339342
340343 var runtimeImageName * storage.ImageName
341344 if features .UnqualifiedSearchRegistries .Enabled () && c .ImageID != "" {
@@ -365,7 +368,12 @@ func (w *deploymentWrap) populateImageMetadata(localImages set.StringSet, pods .
365368 continue
366369 }
367370
368- parsedName , err := imageUtils .GenerateImageFromStringWithOverride (p .Spec .Containers [i ].Image , w .registryOverride )
371+ specContainer , found := specContainersByName [c .Name ]
372+ if ! found {
373+ continue
374+ }
375+
376+ parsedName , err := imageUtils .GenerateImageFromStringWithOverride (specContainer .Image , w .registryOverride )
369377 if err != nil {
370378 // This error will only happen if we could not parse the image, this is possible if the image in kubernetes is malformed
371379 // e.g. us.gcr.io/$PROJECT/xyz:latest is an example that we have seen
0 commit comments