diff --git a/infra/feast-operator/Makefile b/infra/feast-operator/Makefile index a7d0ee95e8e..a2ef6e095da 100644 --- a/infra/feast-operator/Makefile +++ b/infra/feast-operator/Makefile @@ -145,9 +145,9 @@ docker-build: ## Build docker image with the manager. $(CONTAINER_TOOL) build -t ${IMG} . ## Build feast docker image. -.PHONY: feast-docker-build -feast-image-build: - cd ./../.. && VERSION=operator.v0 REGISTRY=example.com make build-feature-transformation-server-docker +.PHONY: feast-ci-dev-docker-img +feast-ci-dev-docker-img: + cd ./../.. && make build-feature-server-dev .PHONY: docker-push diff --git a/infra/feast-operator/README.md b/infra/feast-operator/README.md index 32e2ef11b53..3012eb63d4b 100644 --- a/infra/feast-operator/README.md +++ b/infra/feast-operator/README.md @@ -131,3 +131,28 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + + +## Running End-to-End integration tests on local(dev) environment +You need a kind cluster to run the e2e tests on local(dev) environment. + +```shell +# Default kind cluster configuration is not enough to run all the pods. In my case i was using docker with colima. kind uses the cpi and memory assigned to docker. +# below memory configuration worked well but if you are using other docker runtime then please increase the cpu and memory. +colima start --cpu 10 --memory 15 --disk 100 + +# create the kind cluster +kind create cluster + +# set kubernetes context to the recently created kind cluster +kubectl cluster-info --context kind-kind + +# run the command from operator directory to run e2e tests. +make test-e2e + +# delete cluster once you are done. +kind delete cluster +``` + + + diff --git a/infra/feast-operator/test/e2e/e2e_test.go b/infra/feast-operator/test/e2e/e2e_test.go index 896ce3f4910..7d9fb9af056 100644 --- a/infra/feast-operator/test/e2e/e2e_test.go +++ b/infra/feast-operator/test/e2e/e2e_test.go @@ -27,40 +27,26 @@ import ( "github.com/feast-dev/feast/infra/feast-operator/test/utils" ) -const namespace = "feast-operator-system" +const feastControllerNamespace = "feast-operator-system" var _ = Describe("controller", Ordered, func() { BeforeAll(func() { - By("installing prometheus operator") - Expect(utils.InstallPrometheusOperator()).To(Succeed()) - - By("installing the cert-manager") - Expect(utils.InstallCertManager()).To(Succeed()) - By("creating manager namespace") - cmd := exec.Command("kubectl", "create", "ns", namespace) + cmd := exec.Command("kubectl", "create", "ns", feastControllerNamespace) _, _ = utils.Run(cmd) }) AfterAll(func() { - By("uninstalling the Prometheus manager bundle") - utils.UninstallPrometheusOperator() - - By("uninstalling the cert-manager bundle") - utils.UninstallCertManager() - - By("removing manager namespace") - cmd := exec.Command("kubectl", "delete", "ns", namespace) - _, _ = utils.Run(cmd) + //Add any post clean up code here. }) Context("Operator", func() { - It("should run successfully", func() { - var controllerPodName string + It("Should be able to deploy and run a default feature store CR successfully", func() { + //var controllerPodName string var err error // projectimage stores the name of the image used in the example - var projectimage = "example.com/feast-operator:v0.0.1" + var projectimage = "localhost/feast-operator:v0.0.1" By("building the manager(Operator) image") cmd := exec.Command("make", "docker-build", fmt.Sprintf("IMG=%s", projectimage)) @@ -72,13 +58,20 @@ var _ = Describe("controller", Ordered, func() { ExpectWithOffset(1, err).NotTo(HaveOccurred()) By("building the feast image") - cmd = exec.Command("make", "feast-image-build") + cmd = exec.Command("make", "feast-ci-dev-docker-img") + _, err = utils.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) + // this image will be built in above make target. + var feastImage = "feastdev/feature-server:dev" + var feastLocalImage = "localhost/feastdev/feature-server:dev" + + By("Tag the local feast image for the integration tests") + cmd = exec.Command("docker", "image", "tag", feastImage, feastLocalImage) _, err = utils.Run(cmd) ExpectWithOffset(1, err).NotTo(HaveOccurred()) - var feastImage = "example.com/feature-transformation-server:operator.v0" - By("loading the the feast image on Kind") - err = utils.LoadImageToKindClusterWithName(feastImage) + By("loading the the feast image on Kind cluster") + err = utils.LoadImageToKindClusterWithName(feastLocalImage) ExpectWithOffset(1, err).NotTo(HaveOccurred()) By("installing CRDs") @@ -91,41 +84,88 @@ var _ = Describe("controller", Ordered, func() { _, err = utils.Run(cmd) ExpectWithOffset(1, err).NotTo(HaveOccurred()) - By("validating that the controller-manager pod is running as expected") - verifyControllerUp := func() error { - // Get pod name - - cmd = exec.Command("kubectl", "get", - "pods", "-l", "control-plane=controller-manager", - "-o", "go-template={{ range .items }}"+ - "{{ if not .metadata.deletionTimestamp }}"+ - "{{ .metadata.name }}"+ - "{{ \"\\n\" }}{{ end }}{{ end }}", - "-n", namespace, - ) - - podOutput, err := utils.Run(cmd) - ExpectWithOffset(2, err).NotTo(HaveOccurred()) - podNames := utils.GetNonEmptyLines(string(podOutput)) - if len(podNames) != 1 { - return fmt.Errorf("expect 1 controller pods running, but got %d", len(podNames)) - } - controllerPodName = podNames[0] - ExpectWithOffset(2, controllerPodName).Should(ContainSubstring("controller-manager")) - - // Validate pod status - cmd = exec.Command("kubectl", "get", - "pods", controllerPodName, "-o", "jsonpath={.status.phase}", - "-n", namespace, - ) - status, err := utils.Run(cmd) - ExpectWithOffset(2, err).NotTo(HaveOccurred()) - if string(status) != "Running" { - return fmt.Errorf("controller pod in %s status", status) - } - return nil + timeout := 2 * time.Minute + + controllerDeploymentName := "feast-operator-controller-manager" + By("Validating that the controller-manager deployment is in available state") + err = checkIfDeploymentExistsAndAvailable(feastControllerNamespace, controllerDeploymentName, timeout) + Expect(err).To(BeNil(), fmt.Sprintf( + "Deployment %s is not available but expected to be available. \nError: %v\n", + controllerDeploymentName, err, + )) + fmt.Printf("Feast Control Manager Deployment %s is available\n", controllerDeploymentName) + + By("deploying the Simple Feast Custom Resource to Kubernetes") + cmd = exec.Command("kubectl", "apply", "-f", + "test/testdata/feast_integration_test_crs/v1alpha1_default_featurestore.yaml") + _, cmdOutputerr := utils.Run(cmd) + ExpectWithOffset(1, cmdOutputerr).NotTo(HaveOccurred()) + + namespace := "default" + + deploymentNames := [3]string{"feast-simple-feast-setup-registry", "feast-simple-feast-setup-online", + "feast-simple-feast-setup-offline"} + for _, deploymentName := range deploymentNames { + By(fmt.Sprintf("validate the feast deployment: %s is up and in availability state.", deploymentName)) + err = checkIfDeploymentExistsAndAvailable(namespace, deploymentName, timeout) + Expect(err).To(BeNil(), fmt.Sprintf( + "Deployment %s is not available but expected to be available. \nError: %v\n", + deploymentName, err, + )) + fmt.Printf("Feast Deployment %s is available\n", deploymentName) } - EventuallyWithOffset(1, verifyControllerUp, time.Minute, time.Second).Should(Succeed()) + + By("Check if the feast client - kubernetes config map exists.") + configMapName := "feast-simple-feast-setup-client" + err = checkIfConfigMapExists(namespace, configMapName) + Expect(err).To(BeNil(), fmt.Sprintf( + "config map %s is not available but expected to be available. \nError: %v\n", + configMapName, err, + )) + fmt.Printf("Feast Deployment %s is available\n", configMapName) + + serviceAccountNames := [3]string{"feast-simple-feast-setup-registry", "feast-simple-feast-setup-online", + "feast-simple-feast-setup-offline"} + for _, serviceAccountName := range serviceAccountNames { + By(fmt.Sprintf("validate the feast service account: %s is available.", serviceAccountName)) + err = checkIfServiceAccountExists(namespace, serviceAccountName) + Expect(err).To(BeNil(), fmt.Sprintf( + "Service account %s does not exist in namespace %s. Error: %v", + serviceAccountName, namespace, err, + )) + fmt.Printf("Service account %s exists in namespace %s\n", serviceAccountName, namespace) + } + + serviceNames := [3]string{"feast-simple-feast-setup-registry", "feast-simple-feast-setup-online", + "feast-simple-feast-setup-offline"} + for _, serviceName := range serviceNames { + By(fmt.Sprintf("validate the kubernetes service name: %s is available.", serviceName)) + err = checkIfKubernetesServiceExists(namespace, serviceName) + Expect(err).To(BeNil(), fmt.Sprintf( + "kubernetes service %s is not available but expected to be available. \nError: %v\n", + serviceName, err, + )) + fmt.Printf("kubernetes service %s is available\n", serviceName) + } + + By(fmt.Sprintf("Checking FeatureStore customer resource: %s is in Ready Status.", "simple-feast-setup")) + err = checkIfFeatureStoreCustomResourceConditionsInReady("simple-feast-setup", namespace) + Expect(err).To(BeNil(), fmt.Sprintf( + "FeatureStore custom resource %s all conditions are not in ready state. \nError: %v\n", + "simple-feast-setup", err, + )) + fmt.Printf("FeatureStore customer resource %s conditions are in Ready State\n", "simple-feast-setup") + + By("deleting the feast deployment") + cmd = exec.Command("kubectl", "delete", "-f", + "test/testdata/feast_integration_test_crs/v1alpha1_default_featurestore.yaml") + _, cmdOutputerr = utils.Run(cmd) + ExpectWithOffset(1, cmdOutputerr).NotTo(HaveOccurred()) + + By("Uninstalling the feast CRD") + cmd = exec.Command("kubectl", "delete", "deployment", controllerDeploymentName, "-n", feastControllerNamespace) + _, err = utils.Run(cmd) + ExpectWithOffset(1, err).NotTo(HaveOccurred()) }) }) diff --git a/infra/feast-operator/test/e2e/test_util.go b/infra/feast-operator/test/e2e/test_util.go new file mode 100644 index 00000000000..f30d8cbebf5 --- /dev/null +++ b/infra/feast-operator/test/e2e/test_util.go @@ -0,0 +1,186 @@ +package e2e + +import ( + "bytes" + "encoding/json" + "fmt" + "os/exec" + "strings" + "time" +) + +// dynamically checks if all conditions of custom resource featurestore are in "Ready" state. +func checkIfFeatureStoreCustomResourceConditionsInReady(featureStoreName, namespace string) error { + cmd := exec.Command("kubectl", "get", "featurestore", featureStoreName, "-n", namespace, "-o", "json") + + var out bytes.Buffer + var stderr bytes.Buffer + cmd.Stdout = &out + cmd.Stderr = &stderr + + if err := cmd.Run(); err != nil { + return fmt.Errorf("failed to get resource %s in namespace %s. Error: %v. Stderr: %s", + featureStoreName, namespace, err, stderr.String()) + } + + // Parse the JSON into a generic map + var resource map[string]interface{} + if err := json.Unmarshal(out.Bytes(), &resource); err != nil { + return fmt.Errorf("failed to parse the resource JSON. Error: %v", err) + } + + // Traverse the JSON structure to extract conditions + status, ok := resource["status"].(map[string]interface{}) + if !ok { + return fmt.Errorf("status field is missing or invalid in the resource JSON") + } + + conditions, ok := status["conditions"].([]interface{}) + if !ok { + return fmt.Errorf("conditions field is missing or invalid in the status section") + } + + // Validate all conditions + for _, condition := range conditions { + conditionMap, ok := condition.(map[string]interface{}) + if !ok { + return fmt.Errorf("invalid condition format") + } + + conditionType := conditionMap["type"].(string) + conditionStatus := conditionMap["status"].(string) + + if conditionStatus != "True" { + return fmt.Errorf(" FeatureStore=%s condition '%s' is not in 'Ready' state. Status: %s", + featureStoreName, conditionType, conditionStatus) + } + } + + return nil +} + +// validates if a deployment exists and also in the availability state as True. +func checkIfDeploymentExistsAndAvailable(namespace string, deploymentName string, timeout time.Duration) error { + var output, errOutput bytes.Buffer + + ticker := time.NewTicker(2 * time.Second) + defer ticker.Stop() + + timeoutChan := time.After(timeout) + + for { + select { + case <-timeoutChan: + return fmt.Errorf("timed out waiting for deployment %s to become available", deploymentName) + case <-ticker.C: + // Run kubectl command + cmd := exec.Command("kubectl", "get", "deployment", deploymentName, "-n", namespace, "-o", "json") + cmd.Stdout = &output + cmd.Stderr = &errOutput + + if err := cmd.Run(); err != nil { + // Log error and retry + fmt.Printf("Deployment not yet found, we may try again to find the updated status: %s\n", errOutput.String()) + continue + } + + // Parse the JSON output into a map + var result map[string]interface{} + if err := json.Unmarshal(output.Bytes(), &result); err != nil { + return fmt.Errorf("failed to parse deployment JSON: %v", err) + } + + // Navigate to status.conditions + status, ok := result["status"].(map[string]interface{}) + if !ok { + return fmt.Errorf("failed to get status field from deployment JSON") + } + + conditions, ok := status["conditions"].([]interface{}) + if !ok { + return fmt.Errorf("failed to get conditions field from deployment JSON") + } + + // Check for Available condition + for _, condition := range conditions { + cond, ok := condition.(map[string]interface{}) + if !ok { + continue + } + if cond["type"] == "Available" && cond["status"] == "True" { + return nil // Deployment is available + } + } + + // Reset buffers for the next loop iteration + output.Reset() + errOutput.Reset() + } + } +} + +// validates if a service account exists using the kubectl CLI. +func checkIfServiceAccountExists(namespace, saName string) error { + cmd := exec.Command("kubectl", "get", "sa", saName, "-n", namespace) + + var out bytes.Buffer + var stderr bytes.Buffer + cmd.Stdout = &out + cmd.Stderr = &stderr + + if err := cmd.Run(); err != nil { + return fmt.Errorf("failed to find service account %s in namespace %s. Error: %v. Stderr: %s", + saName, namespace, err, stderr.String()) + } + + // Check the output to confirm presence + if !strings.Contains(out.String(), saName) { + return fmt.Errorf("service account %s not found in namespace %s", saName, namespace) + } + + return nil +} + +// validates if a config map exists using the kubectl CLI. +func checkIfConfigMapExists(namespace, configMapName string) error { + cmd := exec.Command("kubectl", "get", "cm", configMapName, "-n", namespace) + + var out bytes.Buffer + var stderr bytes.Buffer + cmd.Stdout = &out + cmd.Stderr = &stderr + + if err := cmd.Run(); err != nil { + return fmt.Errorf("failed to find config map %s in namespace %s. Error: %v. Stderr: %s", + configMapName, namespace, err, stderr.String()) + } + + // Check the output to confirm presence + if !strings.Contains(out.String(), configMapName) { + return fmt.Errorf("config map %s not found in namespace %s", configMapName, namespace) + } + + return nil +} + +// validates if a kubernetes service exists using the kubectl CLI. +func checkIfKubernetesServiceExists(namespace, serviceName string) error { + cmd := exec.Command("kubectl", "get", "service", serviceName, "-n", namespace) + + var out bytes.Buffer + var stderr bytes.Buffer + cmd.Stdout = &out + cmd.Stderr = &stderr + + if err := cmd.Run(); err != nil { + return fmt.Errorf("failed to find kubernetes service %s in namespace %s. Error: %v. Stderr: %s", + serviceName, namespace, err, stderr.String()) + } + + // Check the output to confirm presence + if !strings.Contains(out.String(), serviceName) { + return fmt.Errorf("kubernetes service %s not found in namespace %s", serviceName, namespace) + } + + return nil +} diff --git a/infra/feast-operator/test/testdata/feast_integration_test_crs/v1alpha1_default_featurestore.yaml b/infra/feast-operator/test/testdata/feast_integration_test_crs/v1alpha1_default_featurestore.yaml new file mode 100644 index 00000000000..0252a5fecf5 --- /dev/null +++ b/infra/feast-operator/test/testdata/feast_integration_test_crs/v1alpha1_default_featurestore.yaml @@ -0,0 +1,14 @@ +apiVersion: feast.dev/v1alpha1 +kind: FeatureStore +metadata: + name: simple-feast-setup +spec: + feastProject: my_project + services: + onlineStore: + image: 'localhost/feastdev/feature-server:dev' + offlineStore: + image: 'localhost/feastdev/feature-server:dev' + registry: + local: + image: 'localhost/feastdev/feature-server:dev'