diff --git a/CHANGELOG.md b/CHANGELOG.md index 2725fb643e2..3607aec6195 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,34 @@ # Changelog +# [0.58.0](https://github.com/feast-dev/feast/compare/v0.57.0...v0.58.0) (2025-12-16) + + +### Bug Fixes + +* Add java proto ([#5719](https://github.com/feast-dev/feast/issues/5719)) ([fc3ea20](https://github.com/feast-dev/feast/commit/fc3ea208e0d37d1083fda261fd374c87fa4da17c)) +* Add possibility to force full features names for materialize ops ([#5728](https://github.com/feast-dev/feast/issues/5728)) ([55c9c36](https://github.com/feast-dev/feast/commit/55c9c36a5cc385869d2c7671a5629da1ba282d47)) +* Fixed file registry cache sync ([09505d4](https://github.com/feast-dev/feast/commit/09505d4ae79f5ca5b452c9c5e41a56e1f7d3d136)) +* Handle hyphon in sqlite project name ([#5575](https://github.com/feast-dev/feast/issues/5575)) ([#5749](https://github.com/feast-dev/feast/issues/5749)) ([b8346ff](https://github.com/feast-dev/feast/commit/b8346ff58c0d78deff52b2a864772dc23741ea75)) +* Pinned substrait to fix protobuf issue ([d0ef4da](https://github.com/feast-dev/feast/commit/d0ef4da05dc91893b14b10b888b3c458fd389ce5)) +* Set TLS certificate annotation only on gRPC service ([#5715](https://github.com/feast-dev/feast/issues/5715)) ([75d13db](https://github.com/feast-dev/feast/commit/75d13db1ea536bd4759dbb719190a266b6466b24)) +* SQLite online store deletes tables from other projects in shared registry scenarios ([#5766](https://github.com/feast-dev/feast/issues/5766)) ([fabce76](https://github.com/feast-dev/feast/commit/fabce765804a8577ef31fb83525adc5df5748c44)) +* Validate not existing entity join keys for preventing panic ([0b93559](https://github.com/feast-dev/feast/commit/0b935595b7e296f1d51a52c42b750a470ea10ceb)) + + +### Features + +* Add annotations for pod templates ([534e647](https://github.com/feast-dev/feast/commit/534e647f40567a8d632fb562f1e58057b1c61c3e)) +* Add Pytorch template ([#5780](https://github.com/feast-dev/feast/issues/5780)) ([6afd353](https://github.com/feast-dev/feast/commit/6afd353425027fe7de561092306c390ea1ad0d19)) +* Add support for extra options for stream source ([#5618](https://github.com/feast-dev/feast/issues/5618)) ([18956c2](https://github.com/feast-dev/feast/commit/18956c2765ebc4c92ca8afc7e02aca8b7dfc339d)) +* Added matched_tag field search api results with fuzzy search capabilities ([#5769](https://github.com/feast-dev/feast/issues/5769)) ([4a9ffae](https://github.com/feast-dev/feast/commit/4a9ffae3d2c7449670d9dddb8b3b341ee00ffc29)) +* Added support for enabling metrics in Feast Operator ([#5317](https://github.com/feast-dev/feast/issues/5317)) ([#5748](https://github.com/feast-dev/feast/issues/5748)) ([a8498c2](https://github.com/feast-dev/feast/commit/a8498c2c43439024cd9516eae8e1733b3f69b577)) +* Configure CacheTTLSecondscache,CacheMode for file-based registry in Feast Operator([#5708](https://github.com/feast-dev/feast/issues/5708)) ([#5744](https://github.com/feast-dev/feast/issues/5744)) ([f25f83b](https://github.com/feast-dev/feast/commit/f25f83b3e4d324a9b52cd2e90036c3d25f25ee80)) +* Implemented Tiling Support for Time-Windowed Aggregations ([#5724](https://github.com/feast-dev/feast/issues/5724)) ([7a99166](https://github.com/feast-dev/feast/commit/7a991660817ffcbe3059b0141f42f15e5f7d5b04)) +* Offline Store historical features retrieval based on datetime range for spark ([#5720](https://github.com/feast-dev/feast/issues/5720)) ([27ec8ec](https://github.com/feast-dev/feast/commit/27ec8ec26b2884d09df3ee3e648b96824e1f2b4c)) +* Offline Store historical features retrieval based on datetime range in dask ([#5717](https://github.com/feast-dev/feast/issues/5717)) ([a16582a](https://github.com/feast-dev/feast/commit/a16582a82914b015095f60a61a3a722c9c82bb63)) +* Production ready feast operator with v1 apiversion ([#5771](https://github.com/feast-dev/feast/issues/5771)) ([49359c6](https://github.com/feast-dev/feast/commit/49359c6227601bc31ee08fdc4f4208fb6d101755)) +* Support for Map value data type ([#5768](https://github.com/feast-dev/feast/issues/5768)) ([#5772](https://github.com/feast-dev/feast/issues/5772)) ([b99a8a9](https://github.com/feast-dev/feast/commit/b99a8a9aeafba912a6ceea8658f5efeae90b245d)) + # [0.57.0](https://github.com/feast-dev/feast/compare/v0.56.0...v0.57.0) (2025-11-13) diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index c06c3398519..7d85aba1ad0 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -68,6 +68,7 @@ * [Scaling Feast](how-to-guides/scaling-feast.md) * [Structuring Feature Repos](how-to-guides/structuring-repos.md) * [Running Feast in production (e.g. on Kubernetes)](how-to-guides/running-feast-in-production.md) +* [Feast on Kubernetes](how-to-guides/feast-on-kubernetes.md) * [Customizing Feast](how-to-guides/customizing-feast/README.md) * [Adding a custom batch materialization engine](how-to-guides/customizing-feast/creating-a-custom-materialization-engine.md) * [Adding a new offline store](how-to-guides/customizing-feast/adding-a-new-offline-store.md) @@ -157,6 +158,7 @@ * [Registry server](reference/feature-servers/registry-server.md) * [\[Beta\] Web UI](reference/alpha-web-ui.md) * [\[Beta\] On demand feature view](reference/beta-on-demand-feature-view.md) +* [\[Alpha\] Static Artifacts Loading](reference/alpha-static-artifacts.md) * [\[Alpha\] Vector Database](reference/alpha-vector-database.md) * [\[Alpha\] Data quality monitoring](reference/dqm.md) * [\[Alpha\] Streaming feature computation with Denormalized](reference/denormalized.md) diff --git a/docs/getting-started/quickstart.md b/docs/getting-started/quickstart.md index 0caba1f7d60..0dcb861db7c 100644 --- a/docs/getting-started/quickstart.md +++ b/docs/getting-started/quickstart.md @@ -97,7 +97,7 @@ Creating a new Feast repository in /home/Jovyan/my_project. Let's take a look at the resulting demo repo itself. It breaks down into * `data/` contains raw demo parquet data -* `example_repo.py` contains demo feature definitions +* `feature_definitions.py` contains demo feature definitions * `feature_store.yaml` contains a demo setup configuring where data sources are * `test_workflow.py` showcases how to run all key Feast commands, including defining, retrieving, and pushing features. You can run this with `python test_workflow.py`. @@ -117,7 +117,7 @@ entity_key_serialization_version: 3 ``` {% endtab %} -{% tab title="example_repo.py" %} +{% tab title="feature_definitions.py" %} ```python # This is an example feature definition file @@ -310,7 +310,7 @@ We'll walk through some snippets of code below and explain ### Step 4: Register feature definitions and deploy your feature store The `apply` command scans python files in the current directory for feature view/entity definitions, registers the -objects, and deploys infrastructure. In this example, it reads `example_repo.py` and sets up SQLite online store tables. Note that we had specified SQLite as the default online store by +objects, and deploys infrastructure. In this example, it reads `feature_definitions.py` and sets up SQLite online store tables. Note that we had specified SQLite as the default online store by configuring `online_store` in `feature_store.yaml`. {% tabs %} @@ -496,7 +496,7 @@ print(training_df.head()) {% endtabs %} ### Step 6: Ingest batch features into your online store -We now serialize the latest values of features since the beginning of time to prepare for serving. Note, `materialize_incremental` serializes all new features since the last `materialize` call, or since the time provided minus the `ttl` timedelta. In this case, this will be `CURRENT_TIME - 1 day` (`ttl` was set on the `FeatureView` instances in [feature_repo/feature_repo/example_repo.py](feature_repo/feature_repo/example_repo.py)). +We now serialize the latest values of features since the beginning of time to prepare for serving. Note, `materialize_incremental` serializes all new features since the last `materialize` call, or since the time provided minus the `ttl` timedelta. In this case, this will be `CURRENT_TIME - 1 day` (`ttl` was set on the `FeatureView` instances in [feature_repo/feature_repo/feature_definitions.py](feature_repo/feature_repo/feature_definitions.py)). {% tabs %} {% tab title="Bash (with timestamp)" %} diff --git a/docs/how-to-guides/feast-on-kubernetes.md b/docs/how-to-guides/feast-on-kubernetes.md new file mode 100644 index 00000000000..5504dbd671a --- /dev/null +++ b/docs/how-to-guides/feast-on-kubernetes.md @@ -0,0 +1,71 @@ +# Feast on Kubernetes + +This page covers deploying Feast on Kubernetes, including the Feast Operator and feature servers. + +## Overview + +Kubernetes is a common target environment for running Feast in production. You can use Kubernetes to: + +1. Run Feast feature servers for online feature retrieval. +2. Run scheduled and ad-hoc jobs (e.g. materialization jobs) as Kubernetes Jobs. +3. Operate Feast components using Kubernetes-native primitives. + +## Feast Operator + +To deploy Feast components on Kubernetes, use the included [feast-operator](../../infra/feast-operator). + +For first-time Operator users, it may be a good exercise to try the [Feast Operator Quickstart](../../examples/operator-quickstart). The quickstart demonstrates some of the Operator's built-in features, e.g. git repos, `feast apply` jobs, etc. + +## Deploy Feast feature servers on Kubernetes + +{% embed url="https://www.youtube.com/playlist?list=PLPzVNzik7rsAN-amQLZckd0so3cIr7blX" %} + +**Basic steps** + +1. Install [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) +2. Install the Operator + +Install the latest release: + +```sh +kubectl apply -f https://raw.githubusercontent.com/feast-dev/feast/refs/heads/stable/infra/feast-operator/dist/install.yaml +``` + +OR, install a specific version: + +```sh +kubectl apply -f https://raw.githubusercontent.com/feast-dev/feast/refs/tags//infra/feast-operator/dist/install.yaml +``` + +3. Deploy a Feature Store + +```sh +kubectl apply -f https://raw.githubusercontent.com/feast-dev/feast/refs/heads/stable/infra/feast-operator/config/samples/v1_featurestore.yaml +``` + +Verify the status: + +```sh +$ kubectl get feast +NAME STATUS AGE +sample Ready 2m21s +``` + +The above will install a simple [FeatureStore CR](../../infra/feast-operator/docs/api/markdown/ref.md) like the following. By default, it will run the [Online Store feature server](../reference/feature-servers/python-feature-server.md): + +```yaml +apiVersion: feast.dev/v1 +kind: FeatureStore +metadata: + name: sample +spec: + feastProject: my_project +``` + +> _More advanced FeatureStore CR examples can be found in the feast-operator [samples directory](../../infra/feast-operator/config/samples)._ + +{% hint style="success" %} +Important note: Scaling a Feature Store Deployment should only be done if the configured data store(s) will support it. + +Please check the how-to guide for some specific recommendations on [how to scale Feast](./scaling-feast.md). +{% endhint %} diff --git a/docs/how-to-guides/running-feast-in-production.md b/docs/how-to-guides/running-feast-in-production.md index 8e256995108..be6fd2afeb4 100644 --- a/docs/how-to-guides/running-feast-in-production.md +++ b/docs/how-to-guides/running-feast-in-production.md @@ -204,54 +204,7 @@ feature_vector = fs.get_online_features( ``` ### 4.2. Deploy Feast feature servers on Kubernetes - -To deploy a Feast feature server on Kubernetes, you should use the included [feast-operator](../../infra/feast-operator). - -{% embed url="https://www.youtube.com/playlist?list=PLPzVNzik7rsAN-amQLZckd0so3cIr7blX" %} - -**Basic steps** -1. Install [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/) -2. Install the Operator - - -Install the latest release -```sh -kubectl apply -f https://raw.githubusercontent.com/feast-dev/feast/refs/heads/stable/infra/feast-operator/dist/install.yaml -``` - -OR, install a specific version - -``` -kubectl apply -f https://raw.githubusercontent.com/feast-dev/feast/refs/tags//infra/feast-operator/dist/install.yaml -``` - -3. Deploy a Feature Store - -```sh -kubectl apply -f https://raw.githubusercontent.com/feast-dev/feast/refs/heads/stable/infra/feast-operator/config/samples/v1_featurestore.yaml -``` -Verify the status -``` -$ kubectl get feast -NAME STATUS AGE -sample Ready 2m21s -``` - -The above will install a simple [FeatureStore CR](../../infra/feast-operator/docs/api/markdown/ref.md) like the following. By default, it will run the [Online Store feature server](../reference/feature-servers/python-feature-server.md) - -```yaml -apiVersion: feast.dev/v1 -kind: FeatureStore -metadata: - name: sample -spec: - feastProject: my_project -``` -> _More advanced FeatureStore CR examples can be found in the feast-operator [samples directory](../../infra/feast-operator/config/samples)._ - -For first-time Operator users, it may be a good exercise to try the [Feast Operator Quickstart](../../examples/operator-quickstart). The quickstart will demonstrate some of the Operator's built-in features, e.g. git repos, `feast apply` jobs, etc. - -{% hint style="success" %} Important note: Scaling a Feature Store Deployment should only be done if the configured data store(s) will support it. - -Please check the how-to guide for some specific recommendations on [how to scale Feast](./scaling-feast.md). {% endhint %} +See [Feast on Kubernetes](./feast-on-kubernetes.md). ## 5. Using environment variables in your yaml configuration diff --git a/docs/reference/alpha-static-artifacts.md b/docs/reference/alpha-static-artifacts.md new file mode 100644 index 00000000000..627898f847f --- /dev/null +++ b/docs/reference/alpha-static-artifacts.md @@ -0,0 +1,314 @@ +# [Alpha] Static Artifacts Loading + +**Warning**: This is an experimental feature. To our knowledge, this is stable, but there are still rough edges in the experience. Contributions are welcome! + +## Overview + +Static Artifacts Loading allows you to load models, lookup tables, and other static resources once during feature server startup instead of loading them on each request. These artifacts are cached in memory and accessible to on-demand feature views for real-time inference. + +This feature optimizes the performance of on-demand feature views that require external resources by eliminating the overhead of repeatedly loading the same artifacts during request processing. + +### Why Use Static Artifacts Loading? + +Static artifacts loading enables data scientists and ML engineers to: + +1. **Improve performance**: Eliminate model loading overhead from each feature request +2. **Enable complex transformations**: Use pre-trained models in on-demand feature views without performance penalties +3. **Share resources**: Multiple feature views can access the same loaded artifacts +4. **Simplify deployment**: Package models and lookup tables with your feature repository + +Common use cases include: +- Sentiment analysis using pre-trained transformers models +- Text classification with small neural networks +- Lookup-based transformations using static dictionaries +- Embedding generation with pre-computed vectors + +## How It Works + +1. **Feature Repository Setup**: Create a `static_artifacts.py` file in your feature repository root +2. **Server Startup**: When `feast serve` starts, it automatically looks for and loads the artifacts +3. **Memory Storage**: Artifacts are stored in the FastAPI application state and accessible via global references +4. **Request Processing**: On-demand feature views access pre-loaded artifacts for fast transformations + +## Example 1: Basic Model Loading + +Create a `static_artifacts.py` file in your feature repository: + +```python +# static_artifacts.py +from fastapi import FastAPI +from transformers import pipeline + +def load_sentiment_model(): + """Load sentiment analysis model.""" + return pipeline( + "sentiment-analysis", + model="cardiffnlp/twitter-roberta-base-sentiment-latest", + device="cpu" + ) + +def load_artifacts(app: FastAPI): + """Load static artifacts into app.state.""" + app.state.sentiment_model = load_sentiment_model() + + # Update global references for access from feature views + import example_repo + example_repo._sentiment_model = app.state.sentiment_model +``` + +Use the pre-loaded model in your on-demand feature view: + +```python +# example_repo.py +import pandas as pd +from feast.on_demand_feature_view import on_demand_feature_view +from feast import Field +from feast.types import String, Float32 + +# Global reference for static artifacts +_sentiment_model = None + +@on_demand_feature_view( + sources=[text_input_request], + schema=[ + Field(name="predicted_sentiment", dtype=String), + Field(name="sentiment_confidence", dtype=Float32), + ], +) +def sentiment_prediction(inputs: pd.DataFrame) -> pd.DataFrame: + """Sentiment prediction using pre-loaded model.""" + global _sentiment_model + + results = [] + for text in inputs["input_text"]: + predictions = _sentiment_model(text) + best_pred = max(predictions, key=lambda x: x["score"]) + + results.append({ + "predicted_sentiment": best_pred["label"], + "sentiment_confidence": best_pred["score"], + }) + + return pd.DataFrame(results) +``` + +## Example 2: Multiple Artifacts with Lookup Tables + +Load multiple types of artifacts: + +```python +# static_artifacts.py +from fastapi import FastAPI +from transformers import pipeline +import json +from pathlib import Path + +def load_sentiment_model(): + """Load sentiment analysis model.""" + return pipeline("sentiment-analysis", model="distilbert-base-uncased-finetuned-sst-2-english") + +def load_lookup_tables(): + """Load static lookup tables.""" + return { + "sentiment_labels": {"NEGATIVE": "negative", "POSITIVE": "positive"}, + "domain_categories": {"twitter.com": "social", "news.com": "news", "github.com": "tech"}, + "priority_users": {"user_123", "user_456", "user_789"} + } + +def load_config(): + """Load application configuration.""" + return { + "model_threshold": 0.7, + "max_text_length": 512, + "default_sentiment": "neutral" + } + +def load_artifacts(app: FastAPI): + """Load all static artifacts.""" + app.state.sentiment_model = load_sentiment_model() + app.state.lookup_tables = load_lookup_tables() + app.state.config = load_config() + + # Update global references + import example_repo + example_repo._sentiment_model = app.state.sentiment_model + example_repo._lookup_tables = app.state.lookup_tables + example_repo._config = app.state.config +``` + +Use multiple artifacts in feature transformations: + +```python +# example_repo.py +import pandas as pd +from feast.on_demand_feature_view import on_demand_feature_view + +# Global references for static artifacts +_sentiment_model = None +_lookup_tables: dict = {} +_config: dict = {} + +@on_demand_feature_view( + sources=[text_input_request, user_input_request], + schema=[ + Field(name="predicted_sentiment", dtype=String), + Field(name="is_priority_user", dtype=Bool), + Field(name="domain_category", dtype=String), + ], +) +def enriched_prediction(inputs: pd.DataFrame) -> pd.DataFrame: + """Multi-artifact feature transformation.""" + global _sentiment_model, _lookup_tables, _config + + results = [] + for i, row in inputs.iterrows(): + text = row["input_text"] + user_id = row["user_id"] + domain = row.get("domain", "") + + # Use pre-loaded model + predictions = _sentiment_model(text) + sentiment_scores = {pred["label"]: pred["score"] for pred in predictions} + + # Use lookup tables + predicted_sentiment = _lookup_tables["sentiment_labels"].get( + max(sentiment_scores, key=sentiment_scores.get), + _config["default_sentiment"] + ) + + is_priority = user_id in _lookup_tables["priority_users"] + category = _lookup_tables["domain_categories"].get(domain, "unknown") + + results.append({ + "predicted_sentiment": predicted_sentiment, + "is_priority_user": is_priority, + "domain_category": category, + }) + + return pd.DataFrame(results) +``` + +## Container Deployment + +Static artifacts work with containerized deployments. Include your artifacts in the container image: + +```dockerfile +FROM python:3.11-slim + +# Install dependencies +COPY requirements.txt . +RUN pip install -r requirements.txt + +# Copy feature repository including static_artifacts.py +COPY feature_repo/ /app/feature_repo/ + +WORKDIR /app/feature_repo + +# Start feature server +CMD ["feast", "serve", "--host", "0.0.0.0"] +``` + +The server will automatically load static artifacts during container startup. + +## Supported Artifact Types + +### Recommended Artifacts +- **Small ML models**: Sentiment analysis, text classification, small neural networks +- **Lookup tables**: Label mappings, category dictionaries, user segments +- **Configuration data**: Model parameters, feature mappings, business rules +- **Pre-computed embeddings**: User vectors, item features, static representations + +### Not Recommended +- **Large Language Models**: Use dedicated serving solutions (vLLM, TensorRT-LLM, TGI) +- **Models requiring specialized hardware**: GPU clusters, TPUs +- **Frequently updated models**: Consider model registries with versioning +- **Large datasets**: Use feature views with proper data sources instead + +## Error Handling + +Static artifacts loading includes graceful error handling: +- **Missing file**: Server starts normally without static artifacts +- **Loading errors**: Warnings are logged, feature views should implement fallback logic +- **Partial failures**: Successfully loaded artifacts remain available + +Always implement fallback behavior in your feature transformations: + +```python +@on_demand_feature_view(...) +def robust_prediction(inputs: pd.DataFrame) -> pd.DataFrame: + global _sentiment_model + + results = [] + for text in inputs["input_text"]: + if _sentiment_model is not None: + # Use pre-loaded model + predictions = _sentiment_model(text) + sentiment = max(predictions, key=lambda x: x["score"])["label"] + else: + # Fallback when artifacts aren't available + sentiment = "neutral" + + results.append({"predicted_sentiment": sentiment}) + + return pd.DataFrame(results) +``` + +## Starting the Feature Server + +Start the feature server as usual: + +```bash +feast serve +``` + +You'll see log messages indicating artifact loading: + +``` +INFO:fastapi:Loading static artifacts from static_artifacts.py +INFO:fastapi:Static artifacts loading completed +INFO:uvicorn:Application startup complete +``` + +## Template Example + +The PyTorch NLP template demonstrates static artifacts loading: + +```bash +feast init my-nlp-project -t pytorch_nlp +cd my-nlp-project/feature_repo +feast serve +``` + +This template includes a complete example with sentiment analysis model loading, lookup tables, and integration with on-demand feature views. + +## Performance Considerations + +- **Startup time**: Artifacts are loaded during server initialization, which may increase startup time +- **Memory usage**: All artifacts remain in memory for the server's lifetime +- **Concurrency**: Artifacts are shared across all request threads +- **Container resources**: Ensure sufficient memory allocation for your artifacts + +## Configuration + +Currently, static artifacts loading uses convention-based configuration: +- **File name**: Must be named `static_artifacts.py` +- **Location**: Must be in the feature repository root directory +- **Function name**: Must implement `load_artifacts(app: FastAPI)` function + +## Limitations + +- File name and location are currently fixed (not configurable) +- Artifacts are loaded synchronously during startup +- No built-in artifact versioning or hot reloading +- Limited to Python-based artifacts (no external binaries) + +## Contributing + +This is an alpha feature and we welcome contributions! Areas for improvement: +- Configurable artifact file locations +- Asynchronous artifact loading +- Built-in artifact versioning +- Performance monitoring and metrics +- Integration with model registries + +Please report issues and contribute improvements via the [Feast GitHub repository](https://github.com/feast-dev/feast). \ No newline at end of file diff --git a/docs/reference/feature-servers/python-feature-server.md b/docs/reference/feature-servers/python-feature-server.md index f8e121ad6af..df0b0b1f78d 100644 --- a/docs/reference/feature-servers/python-feature-server.md +++ b/docs/reference/feature-servers/python-feature-server.md @@ -268,6 +268,48 @@ To start the feature server in TLS mode, you need to provide the private and pub feast serve --key /path/to/key.pem --cert /path/to/cert.pem ``` +# [Alpha] Static Artifacts Loading + +**Warning**: This is an experimental feature. To our knowledge, this is stable, but there are still rough edges in the experience. + +Static artifacts loading allows you to load models, lookup tables, and other static resources once during feature server startup instead of loading them on each request. This improves performance for on-demand feature views that require external resources. + +## Quick Example + +Create a `static_artifacts.py` file in your feature repository: + +```python +# static_artifacts.py +from fastapi import FastAPI +from transformers import pipeline + +def load_artifacts(app: FastAPI): + """Load static artifacts into app.state.""" + app.state.sentiment_model = pipeline("sentiment-analysis", model="distilbert-base-uncased-finetuned-sst-2-english") + + # Update global references for access from feature views + import example_repo + example_repo._sentiment_model = app.state.sentiment_model +``` + +Access pre-loaded artifacts in your on-demand feature views: + +```python +# example_repo.py +_sentiment_model = None + +@on_demand_feature_view(...) +def sentiment_prediction(inputs: pd.DataFrame) -> pd.DataFrame: + global _sentiment_model + return _sentiment_model(inputs["text"]) +``` + +## Documentation + +For comprehensive documentation, examples, and best practices, see the [Alpha Static Artifacts Loading](../alpha-static-artifacts.md) reference guide. + +The [PyTorch NLP template](https://github.com/feast-dev/feast/tree/main/sdk/python/feast/templates/pytorch_nlp) provides a complete working example. + # Online Feature Server Permissions and Access Control ## API Endpoints and Permissions diff --git a/docs/reference/online-stores/dragonfly.md b/docs/reference/online-stores/dragonfly.md index bcd814ecc45..723761ed3bb 100644 --- a/docs/reference/online-stores/dragonfly.md +++ b/docs/reference/online-stores/dragonfly.md @@ -48,7 +48,7 @@ There are several options available to get Dragonfly up and running quickly. We `feast apply` -The `apply` command scans python files in the current directory (`example_repo.py` in this case) for feature view/entity definitions, registers the objects, and deploys infrastructure. +The `apply` command scans python files in the current directory (`feature_definitions.py` in this case) for feature view/entity definitions, registers the objects, and deploys infrastructure. You should see the following output: ``` diff --git a/examples/rhoai-quickstart/feast-demo-quickstart.ipynb b/examples/rhoai-quickstart/feast-demo-quickstart.ipynb index 8777798e49f..0e6eab794db 100644 --- a/examples/rhoai-quickstart/feast-demo-quickstart.ipynb +++ b/examples/rhoai-quickstart/feast-demo-quickstart.ipynb @@ -230,7 +230,7 @@ "id": "cd23ce19-03fa-4353-86d3-261c7f514fd8", "metadata": {}, "source": [ - "File `data/driver_stats.parquet` is generated by the `feast init` command and it acts a historical information source to this example. We have defined this source in the [my_feast_project/feature_repo/example_repo.py](./my_feast_project/feature_repo/example_repo.py) file.\n", + "File `data/driver_stats.parquet` is generated by the `feast init` command and it acts a historical information source to this example. We have defined this source in the [my_feast_project/feature_repo/feature_definitions.py](./my_feast_project/feature_repo/feature_definitions.py) file.\n", "\n", "```python\n", "driver_stats_source = FileSource(\n", diff --git a/infra/charts/feast-feature-server/Chart.yaml b/infra/charts/feast-feature-server/Chart.yaml index 70f98bf8190..b0d73fd0b81 100644 --- a/infra/charts/feast-feature-server/Chart.yaml +++ b/infra/charts/feast-feature-server/Chart.yaml @@ -2,7 +2,7 @@ apiVersion: v2 name: feast-feature-server description: Feast Feature Server in Go or Python type: application -version: 0.57.0 +version: 0.58.0 keywords: - machine learning - big data diff --git a/infra/charts/feast-feature-server/README.md b/infra/charts/feast-feature-server/README.md index 48dcac6904f..10b8bb63537 100644 --- a/infra/charts/feast-feature-server/README.md +++ b/infra/charts/feast-feature-server/README.md @@ -1,6 +1,6 @@ # Feast Python / Go Feature Server Helm Charts -Current chart version is `0.57.0` +Current chart version is `0.58.0` ## Installation @@ -42,7 +42,7 @@ See [here](https://github.com/feast-dev/feast/tree/master/examples/python-helm-d | fullnameOverride | string | `""` | | | image.pullPolicy | string | `"IfNotPresent"` | | | image.repository | string | `"quay.io/feastdev/feature-server"` | Docker image for Feature Server repository | -| image.tag | string | `"0.57.0"` | The Docker image tag (can be overwritten if custom feature server deps are needed for on demand transforms) | +| image.tag | string | `"0.58.0"` | The Docker image tag (can be overwritten if custom feature server deps are needed for on demand transforms) | | imagePullSecrets | list | `[]` | | | livenessProbe.initialDelaySeconds | int | `30` | | | livenessProbe.periodSeconds | int | `30` | | diff --git a/infra/charts/feast-feature-server/values.yaml b/infra/charts/feast-feature-server/values.yaml index 452a4579f5c..ec80023eead 100644 --- a/infra/charts/feast-feature-server/values.yaml +++ b/infra/charts/feast-feature-server/values.yaml @@ -9,7 +9,7 @@ image: repository: quay.io/feastdev/feature-server pullPolicy: IfNotPresent # image.tag -- The Docker image tag (can be overwritten if custom feature server deps are needed for on demand transforms) - tag: 0.57.0 + tag: 0.58.0 logLevel: "WARNING" # Set log level DEBUG, INFO, WARNING, ERROR, and CRITICAL (case-insensitive) diff --git a/infra/charts/feast/Chart.yaml b/infra/charts/feast/Chart.yaml index d5c4ed68f1b..b28ac762e40 100644 --- a/infra/charts/feast/Chart.yaml +++ b/infra/charts/feast/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v1 description: Feature store for machine learning name: feast -version: 0.57.0 +version: 0.58.0 keywords: - machine learning - big data diff --git a/infra/charts/feast/README.md b/infra/charts/feast/README.md index 91493736a07..d2260f15f2e 100644 --- a/infra/charts/feast/README.md +++ b/infra/charts/feast/README.md @@ -8,7 +8,7 @@ This repo contains Helm charts for Feast Java components that are being installe ## Chart: Feast -Feature store for machine learning Current chart version is `0.57.0` +Feature store for machine learning Current chart version is `0.58.0` ## Installation @@ -65,8 +65,8 @@ See [here](https://github.com/feast-dev/feast/tree/master/examples/java-demo) fo | Repository | Name | Version | |------------|------|---------| | https://charts.helm.sh/stable | redis | 10.5.6 | -| https://feast-helm-charts.storage.googleapis.com | feature-server(feature-server) | 0.57.0 | -| https://feast-helm-charts.storage.googleapis.com | transformation-service(transformation-service) | 0.57.0 | +| https://feast-helm-charts.storage.googleapis.com | feature-server(feature-server) | 0.58.0 | +| https://feast-helm-charts.storage.googleapis.com | transformation-service(transformation-service) | 0.58.0 | ## Values diff --git a/infra/charts/feast/charts/feature-server/Chart.yaml b/infra/charts/feast/charts/feature-server/Chart.yaml index 99f22582d38..db6162d96bc 100644 --- a/infra/charts/feast/charts/feature-server/Chart.yaml +++ b/infra/charts/feast/charts/feature-server/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v1 description: "Feast Feature Server: Online feature serving service for Feast" name: feature-server -version: 0.57.0 -appVersion: v0.57.0 +version: 0.58.0 +appVersion: v0.58.0 keywords: - machine learning - big data diff --git a/infra/charts/feast/charts/feature-server/README.md b/infra/charts/feast/charts/feature-server/README.md index 21b234dd245..3e8283ac30f 100644 --- a/infra/charts/feast/charts/feature-server/README.md +++ b/infra/charts/feast/charts/feature-server/README.md @@ -1,6 +1,6 @@ # feature-server -![Version: 0.57.0](https://img.shields.io/badge/Version-0.57.0-informational?style=flat-square) ![AppVersion: v0.57.0](https://img.shields.io/badge/AppVersion-v0.57.0-informational?style=flat-square) +![Version: 0.58.0](https://img.shields.io/badge/Version-0.58.0-informational?style=flat-square) ![AppVersion: v0.58.0](https://img.shields.io/badge/AppVersion-v0.58.0-informational?style=flat-square) Feast Feature Server: Online feature serving service for Feast @@ -17,7 +17,7 @@ Feast Feature Server: Online feature serving service for Feast | envOverrides | object | `{}` | Extra environment variables to set | | image.pullPolicy | string | `"IfNotPresent"` | Image pull policy | | image.repository | string | `"quay.io/feastdev/feature-server-java"` | Docker image for Feature Server repository | -| image.tag | string | `"0.57.0"` | Image tag | +| image.tag | string | `"0.58.0"` | Image tag | | ingress.grpc.annotations | object | `{}` | Extra annotations for the ingress | | ingress.grpc.auth.enabled | bool | `false` | Flag to enable auth | | ingress.grpc.class | string | `"nginx"` | Which ingress controller to use | diff --git a/infra/charts/feast/charts/feature-server/values.yaml b/infra/charts/feast/charts/feature-server/values.yaml index d0a74ad3ba5..2aa4d791598 100644 --- a/infra/charts/feast/charts/feature-server/values.yaml +++ b/infra/charts/feast/charts/feature-server/values.yaml @@ -5,7 +5,7 @@ image: # image.repository -- Docker image for Feature Server repository repository: quay.io/feastdev/feature-server-java # image.tag -- Image tag - tag: 0.57.0 + tag: 0.58.0 # image.pullPolicy -- Image pull policy pullPolicy: IfNotPresent diff --git a/infra/charts/feast/charts/transformation-service/Chart.yaml b/infra/charts/feast/charts/transformation-service/Chart.yaml index 83690118ff3..9a343b38526 100644 --- a/infra/charts/feast/charts/transformation-service/Chart.yaml +++ b/infra/charts/feast/charts/transformation-service/Chart.yaml @@ -1,8 +1,8 @@ apiVersion: v1 description: "Transformation service: to compute on-demand features" name: transformation-service -version: 0.57.0 -appVersion: v0.57.0 +version: 0.58.0 +appVersion: v0.58.0 keywords: - machine learning - big data diff --git a/infra/charts/feast/charts/transformation-service/README.md b/infra/charts/feast/charts/transformation-service/README.md index 23a4e2e491c..14de7d7b796 100644 --- a/infra/charts/feast/charts/transformation-service/README.md +++ b/infra/charts/feast/charts/transformation-service/README.md @@ -1,6 +1,6 @@ # transformation-service -![Version: 0.57.0](https://img.shields.io/badge/Version-0.57.0-informational?style=flat-square) ![AppVersion: v0.57.0](https://img.shields.io/badge/AppVersion-v0.57.0-informational?style=flat-square) +![Version: 0.58.0](https://img.shields.io/badge/Version-0.58.0-informational?style=flat-square) ![AppVersion: v0.58.0](https://img.shields.io/badge/AppVersion-v0.58.0-informational?style=flat-square) Transformation service: to compute on-demand features @@ -13,7 +13,7 @@ Transformation service: to compute on-demand features | envOverrides | object | `{}` | Extra environment variables to set | | image.pullPolicy | string | `"IfNotPresent"` | Image pull policy | | image.repository | string | `"quay.io/feastdev/feature-transformation-server"` | Docker image for Transformation Server repository | -| image.tag | string | `"0.57.0"` | Image tag | +| image.tag | string | `"0.58.0"` | Image tag | | nodeSelector | object | `{}` | Node labels for pod assignment | | podLabels | object | `{}` | Labels to be added to Feast Serving pods | | replicaCount | int | `1` | Number of pods that will be created | diff --git a/infra/charts/feast/charts/transformation-service/values.yaml b/infra/charts/feast/charts/transformation-service/values.yaml index fad48630231..8372fb268f1 100644 --- a/infra/charts/feast/charts/transformation-service/values.yaml +++ b/infra/charts/feast/charts/transformation-service/values.yaml @@ -5,7 +5,7 @@ image: # image.repository -- Docker image for Transformation Server repository repository: quay.io/feastdev/feature-transformation-server # image.tag -- Image tag - tag: 0.57.0 + tag: 0.58.0 # image.pullPolicy -- Image pull policy pullPolicy: IfNotPresent diff --git a/infra/charts/feast/requirements.yaml b/infra/charts/feast/requirements.yaml index e55c068f904..2c2163f10ae 100644 --- a/infra/charts/feast/requirements.yaml +++ b/infra/charts/feast/requirements.yaml @@ -1,12 +1,12 @@ dependencies: - name: feature-server alias: feature-server - version: 0.57.0 + version: 0.58.0 condition: feature-server.enabled repository: https://feast-helm-charts.storage.googleapis.com - name: transformation-service alias: transformation-service - version: 0.57.0 + version: 0.58.0 condition: transformation-service.enabled repository: https://feast-helm-charts.storage.googleapis.com - name: redis diff --git a/infra/feast-operator/Makefile b/infra/feast-operator/Makefile index 07b22fa0271..a750fad7d38 100644 --- a/infra/feast-operator/Makefile +++ b/infra/feast-operator/Makefile @@ -3,7 +3,7 @@ # To re-generate a bundle for another specific version without changing the standard setup, you can: # - use the VERSION as arg of the bundle target (e.g make bundle VERSION=0.0.2) # - use environment variables to overwrite this value (e.g export VERSION=0.0.2) -VERSION ?= 0.57.0 +VERSION ?= 0.58.0 # CHANNELS define the bundle channels used in the bundle. # Add a new line here if you would like to change its default config. (E.g CHANNELS = "candidate,fast,stable") diff --git a/infra/feast-operator/api/feastversion/version.go b/infra/feast-operator/api/feastversion/version.go index 1e3985859ee..f9fde825f06 100644 --- a/infra/feast-operator/api/feastversion/version.go +++ b/infra/feast-operator/api/feastversion/version.go @@ -17,4 +17,4 @@ limitations under the License. package feastversion // Feast release version. Keep on line #20, this is critical to release CI -const FeastVersion = "0.57.0" +const FeastVersion = "0.58.0" diff --git a/infra/feast-operator/bundle/manifests/feast-operator.clusterserviceversion.yaml b/infra/feast-operator/bundle/manifests/feast-operator.clusterserviceversion.yaml index 244f9565905..fdf5733c83a 100644 --- a/infra/feast-operator/bundle/manifests/feast-operator.clusterserviceversion.yaml +++ b/infra/feast-operator/bundle/manifests/feast-operator.clusterserviceversion.yaml @@ -5,7 +5,7 @@ metadata: alm-examples: |- [ { - "apiVersion": "feast.dev/v1alpha1", + "apiVersion": "feast.dev/v1", "kind": "FeatureStore", "metadata": { "name": "sample" @@ -15,7 +15,7 @@ metadata: } }, { - "apiVersion": "feast.dev/v1alpha1", + "apiVersion": "feast.dev/v1", "kind": "FeatureStore", "metadata": { "name": "sample-remote-servers" @@ -36,7 +36,7 @@ metadata: } }, { - "apiVersion": "feast.dev/v1alpha1", + "apiVersion": "feast.dev/v1", "kind": "FeatureStore", "metadata": { "name": "sample-ui" @@ -50,15 +50,20 @@ metadata: } ] capabilities: Basic Install - createdAt: "2025-11-13T20:26:23Z" + createdAt: "2025-12-16T17:51:22Z" operators.operatorframework.io/builder: operator-sdk-v1.38.0 operators.operatorframework.io/project_layout: go.kubebuilder.io/v4 - name: feast-operator.v0.57.0 + name: feast-operator.v0.58.0 namespace: placeholder spec: apiservicedefinitions: {} customresourcedefinitions: owned: + - description: FeatureStore is the Schema for the featurestores API + displayName: Feature Store + kind: FeatureStore + name: featurestores.feast.dev + version: v1 - description: FeatureStore is the Schema for the featurestores API displayName: Feature Store kind: FeatureStore @@ -225,10 +230,10 @@ spec: - /manager env: - name: RELATED_IMAGE_FEATURE_SERVER - value: quay.io/feastdev/feature-server:0.57.0 + value: quay.io/feastdev/feature-server:0.58.0 - name: RELATED_IMAGE_CRON_JOB value: quay.io/openshift/origin-cli:4.17 - image: quay.io/feastdev/feast-operator:0.57.0 + image: quay.io/feastdev/feast-operator:0.58.0 livenessProbe: httpGet: path: /healthz @@ -318,8 +323,8 @@ spec: name: Feast Community url: https://lf-aidata.atlassian.net/wiki/spaces/FEAST/ relatedImages: - - image: quay.io/feastdev/feature-server:0.57.0 + - image: quay.io/feastdev/feature-server:0.58.0 name: feature-server - image: quay.io/openshift/origin-cli:4.17 name: cron-job - version: 0.57.0 + version: 0.58.0 diff --git a/infra/feast-operator/bundle/manifests/feast.dev_featurestores.yaml b/infra/feast-operator/bundle/manifests/feast.dev_featurestores.yaml index 3a99b2e7a70..f85530b85b2 100644 --- a/infra/feast-operator/bundle/manifests/feast.dev_featurestores.yaml +++ b/infra/feast-operator/bundle/manifests/feast.dev_featurestores.yaml @@ -23,6 +23,8205 @@ spec: - jsonPath: .metadata.creationTimestamp name: Age type: date + name: v1 + schema: + openAPIV3Schema: + description: FeatureStore is the Schema for the featurestores API + properties: + apiVersion: + description: APIVersion defines the versioned schema of this representation + of an object. + type: string + kind: + description: Kind is a string value representing the REST resource this + object represents. + type: string + metadata: + type: object + spec: + description: FeatureStoreSpec defines the desired state of FeatureStore + properties: + authz: + description: AuthzConfig defines the authorization settings for the + deployed Feast services. + properties: + kubernetes: + description: |- + KubernetesAuthz provides a way to define the authorization settings using Kubernetes RBAC resources. + https://kubernetes. + properties: + roles: + description: The Kubernetes RBAC roles to be deployed in the + same namespace of the FeatureStore. + items: + type: string + type: array + type: object + oidc: + description: |- + OidcAuthz defines the authorization settings for deployments using an Open ID Connect identity provider. + https://auth0. + properties: + secretRef: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + required: + - secretRef + type: object + type: object + x-kubernetes-validations: + - message: One selection required between kubernetes or oidc. + rule: '[has(self.kubernetes), has(self.oidc)].exists_one(c, c)' + cronJob: + description: FeastCronJob defines a CronJob to execute against a Feature + Store deployment. + properties: + annotations: + additionalProperties: + type: string + description: Annotations to be added to the CronJob metadata. + type: object + concurrencyPolicy: + description: Specifies how to treat concurrent executions of a + Job. + type: string + containerConfigs: + description: CronJobContainerConfigs k8s container settings for + the CronJob + properties: + commands: + description: Array of commands to be executed (in order) against + a Feature Store deployment. + items: + type: string + type: array + env: + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must + be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the ConfigMap or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: 'Selects a field of the pod: supports + metadata.name, metadata.namespace, `metadata.labels['''']`, + `metadata.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits. + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + items: + description: EnvFromSource represents the source of a set + of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the ConfigMap must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to each + key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the Secret must be + defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + type: string + imagePullPolicy: + description: PullPolicy describes a policy for if/when to + pull a container image + type: string + nodeSelector: + additionalProperties: + type: string + type: object + resources: + description: ResourceRequirements describes the compute resource + requirements. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes. + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Requests describes the minimum amount of + compute resources required. + type: object + type: object + type: object + failedJobsHistoryLimit: + description: The number of failed finished jobs to retain. Value + must be non-negative integer. + format: int32 + type: integer + jobSpec: + description: Specification of the desired behavior of a job. + properties: + activeDeadlineSeconds: + description: |- + Specifies the duration in seconds relative to the startTime that the job + may be continuously active before the system tr + format: int64 + type: integer + backoffLimit: + description: Specifies the number of retries before marking + this job failed. + format: int32 + type: integer + backoffLimitPerIndex: + description: |- + Specifies the limit for the number of retries within an + index before marking this index as failed. + format: int32 + type: integer + completionMode: + description: |- + completionMode specifies how Pod completions are tracked. It can be + `NonIndexed` (default) or `Indexed`. + type: string + completions: + description: |- + Specifies the desired number of successfully finished pods the + job should be run with. + format: int32 + type: integer + maxFailedIndexes: + description: |- + Specifies the maximal number of failed indexes before marking the Job as + failed, when backoffLimitPerIndex is set. + format: int32 + type: integer + parallelism: + description: |- + Specifies the maximum desired number of pods the job should + run at any given time. + format: int32 + type: integer + podFailurePolicy: + description: Specifies the policy of handling failed pods. + properties: + rules: + description: A list of pod failure policy rules. The rules + are evaluated in order. + items: + description: PodFailurePolicyRule describes how a pod + failure is handled when the requirements are met. + properties: + action: + description: Specifies the action taken on a pod + failure when the requirements are satisfied. + type: string + onExitCodes: + description: Represents the requirement on the container + exit codes. + properties: + containerName: + description: |- + Restricts the check for exit codes to the container with the + specified name. + type: string + operator: + description: |- + Represents the relationship between the container exit code(s) and the + specified values. + type: string + values: + description: Specifies the set of values. + items: + format: int32 + type: integer + type: array + x-kubernetes-list-type: set + required: + - operator + - values + type: object + onPodConditions: + description: |- + Represents the requirement on the pod conditions. The requirement is represented + as a list of pod condition patterns. + items: + description: |- + PodFailurePolicyOnPodConditionsPattern describes a pattern for matching + an actual pod condition type. + properties: + status: + description: Specifies the required Pod condition + status. + type: string + type: + description: Specifies the required Pod condition + type. + type: string + required: + - status + - type + type: object + type: array + x-kubernetes-list-type: atomic + required: + - action + type: object + type: array + x-kubernetes-list-type: atomic + required: + - rules + type: object + podReplacementPolicy: + description: podReplacementPolicy specifies when to create + replacement Pods. + type: string + podTemplateAnnotations: + additionalProperties: + type: string + description: |- + PodTemplateAnnotations are annotations to be applied to the CronJob's PodTemplate + metadata. + type: object + suspend: + description: suspend specifies whether the Job controller + should create Pods or not. + type: boolean + ttlSecondsAfterFinished: + description: |- + ttlSecondsAfterFinished limits the lifetime of a Job that has finished + execution (either Complete or Failed). + format: int32 + type: integer + type: object + schedule: + description: The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. + type: string + startingDeadlineSeconds: + description: |- + Optional deadline in seconds for starting the job if it misses scheduled + time for any reason. + format: int64 + type: integer + successfulJobsHistoryLimit: + description: The number of successful finished jobs to retain. + Value must be non-negative integer. + format: int32 + type: integer + suspend: + description: |- + This flag tells the controller to suspend subsequent executions, it does + not apply to already started executions. + type: boolean + timeZone: + description: The time zone name for the given schedule, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones. + type: string + type: object + feastProject: + description: FeastProject is the Feast project id. + pattern: ^[A-Za-z0-9][A-Za-z0-9_-]*$ + type: string + feastProjectDir: + description: FeastProjectDir defines how to create the feast project + directory. + properties: + git: + description: GitCloneOptions describes how a clone should be performed. + properties: + configs: + additionalProperties: + type: string + description: |- + Configs passed to git via `-c` + e.g. http.sslVerify: 'false' + OR 'url."https://api:\${TOKEN}@github.com/". + type: object + env: + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must + be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the ConfigMap or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: 'Selects a field of the pod: supports + metadata.name, metadata.namespace, `metadata.labels['''']`, + `metadata.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits. + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + items: + description: EnvFromSource represents the source of a set + of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the ConfigMap must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to each + key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the Secret must be + defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + featureRepoPath: + description: FeatureRepoPath is the relative path to the feature + repo subdirectory. Default is 'feature_repo'. + type: string + ref: + description: Reference to a branch / tag / commit + type: string + url: + description: The repository URL to clone from. + type: string + required: + - url + type: object + x-kubernetes-validations: + - message: RepoPath must be a file name only, with no slashes. + rule: 'has(self.featureRepoPath) ? !self.featureRepoPath.startsWith(''/'') + : true' + init: + description: FeastInitOptions defines how to run a `feast init`. + properties: + minimal: + type: boolean + template: + description: Template for the created project + enum: + - local + - gcp + - aws + - snowflake + - spark + - postgres + - hbase + - cassandra + - hazelcast + - ikv + - couchbase + - clickhouse + type: string + type: object + type: object + x-kubernetes-validations: + - message: One selection required between init or git. + rule: '[has(self.git), has(self.init)].exists_one(c, c)' + services: + description: FeatureStoreServices defines the desired feast services. + An ephemeral onlineStore feature server is deployed by default. + properties: + deploymentStrategy: + description: DeploymentStrategy describes how to replace existing + pods with new ones. + properties: + rollingUpdate: + description: |- + Rolling update config params. Present only if DeploymentStrategyType = + RollingUpdate. + properties: + maxSurge: + anyOf: + - type: integer + - type: string + description: |- + The maximum number of pods that can be scheduled above the desired number of + pods. + x-kubernetes-int-or-string: true + maxUnavailable: + anyOf: + - type: integer + - type: string + description: The maximum number of pods that can be unavailable + during the update. + x-kubernetes-int-or-string: true + type: object + type: + description: Type of deployment. Can be "Recreate" or "RollingUpdate". + Default is RollingUpdate. + type: string + type: object + disableInitContainers: + description: Disable the 'feast repo initialization' initContainer + type: boolean + offlineStore: + description: OfflineStore configures the offline store service + properties: + persistence: + description: OfflineStorePersistence configures the persistence + settings for the offline store service + properties: + file: + description: OfflineStoreFilePersistence configures the + file-based persistence for the offline store service + properties: + pvc: + description: PvcConfig defines the settings for a + persistent file store based on PVCs. + properties: + create: + description: Settings for creating a new PVC + properties: + accessModes: + description: AccessModes k8s persistent volume + access modes. Defaults to ["ReadWriteOnce"]. + items: + type: string + type: array + resources: + description: Resources describes the storage + resource requirements for a volume. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes. + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Requests describes the minimum + amount of compute resources required. + type: object + type: object + storageClassName: + description: StorageClassName is the name + of an existing StorageClass to which this + persistent volume belongs. + type: string + type: object + x-kubernetes-validations: + - message: PvcCreate is immutable + rule: self == oldSelf + mountPath: + description: |- + MountPath within the container at which the volume should be mounted. + Must start by "/" and cannot contain ':'. + type: string + ref: + description: Reference to an existing field + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + required: + - mountPath + type: object + x-kubernetes-validations: + - message: One selection is required between ref and + create. + rule: '[has(self.ref), has(self.create)].exists_one(c, + c)' + - message: Mount path must start with '/' and must + not contain ':' + rule: self.mountPath.matches('^/[^:]*$') + type: + enum: + - file + - dask + - duckdb + type: string + type: object + store: + description: OfflineStoreDBStorePersistence configures + the DB store persistence for the offline store service + properties: + secretKeyName: + description: By default, the selected store "type" + is used as the SecretKeyName + type: string + secretRef: + description: Data store parameters should be placed + as-is from the "feature_store.yaml" under the secret + key. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + type: + description: Type of the persistence type you want + to use. + enum: + - snowflake.offline + - bigquery + - redshift + - spark + - postgres + - trino + - athena + - mssql + - couchbase.offline + - clickhouse + - ray + type: string + required: + - secretRef + - type + type: object + type: object + x-kubernetes-validations: + - message: One selection required between file or store. + rule: '[has(self.file), has(self.store)].exists_one(c, c)' + server: + description: Creates a remote offline server container + properties: + env: + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. Must + be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: 'Selects a field of the pod: supports + metadata.name, metadata.namespace, `metadata.labels['''']`, + `metadata.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the + pod's namespace + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the ConfigMap must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to + each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + type: string + imagePullPolicy: + description: PullPolicy describes a policy for if/when + to pull a container image + type: string + logLevel: + description: |- + LogLevel sets the logging level for the server + Allowed values: "debug", "info", "warning", "error", "critical". + enum: + - debug + - info + - warning + - error + - critical + type: string + metrics: + description: Metrics exposes Prometheus-compatible metrics + for the Feast server when enabled. + type: boolean + nodeSelector: + additionalProperties: + type: string + type: object + resources: + description: ResourceRequirements describes the compute + resource requirements. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes. + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Requests describes the minimum amount + of compute resources required. + type: object + type: object + tls: + description: TlsConfigs configures server TLS for a feast + service. + properties: + disable: + description: will disable TLS for the feast service. + useful in an openshift cluster, for example, where + TLS is configured by default + type: boolean + secretKeyNames: + description: SecretKeyNames defines the secret key + names for the TLS key and cert. + properties: + tlsCrt: + description: defaults to "tls.crt" + type: string + tlsKey: + description: defaults to "tls.key" + type: string + type: object + secretRef: + description: references the local k8s secret where + the TLS key and cert reside + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + type: object + x-kubernetes-validations: + - message: '`secretRef` required if `disable` is false.' + rule: '(!has(self.disable) || !self.disable) ? has(self.secretRef) + : true' + volumeMounts: + description: VolumeMounts defines the list of volumes + that should be mounted into the feast container. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from + which the container's volume should be mounted. + type: string + required: + - mountPath + - name + type: object + type: array + type: object + type: object + onlineStore: + description: OnlineStore configures the online store service + properties: + persistence: + description: OnlineStorePersistence configures the persistence + settings for the online store service + properties: + file: + description: OnlineStoreFilePersistence configures the + file-based persistence for the online store service + properties: + path: + type: string + pvc: + description: PvcConfig defines the settings for a + persistent file store based on PVCs. + properties: + create: + description: Settings for creating a new PVC + properties: + accessModes: + description: AccessModes k8s persistent volume + access modes. Defaults to ["ReadWriteOnce"]. + items: + type: string + type: array + resources: + description: Resources describes the storage + resource requirements for a volume. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes. + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Requests describes the minimum + amount of compute resources required. + type: object + type: object + storageClassName: + description: StorageClassName is the name + of an existing StorageClass to which this + persistent volume belongs. + type: string + type: object + x-kubernetes-validations: + - message: PvcCreate is immutable + rule: self == oldSelf + mountPath: + description: |- + MountPath within the container at which the volume should be mounted. + Must start by "/" and cannot contain ':'. + type: string + ref: + description: Reference to an existing field + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + required: + - mountPath + type: object + x-kubernetes-validations: + - message: One selection is required between ref and + create. + rule: '[has(self.ref), has(self.create)].exists_one(c, + c)' + - message: Mount path must start with '/' and must + not contain ':' + rule: self.mountPath.matches('^/[^:]*$') + type: object + x-kubernetes-validations: + - message: Ephemeral stores must have absolute paths. + rule: '(!has(self.pvc) && has(self.path)) ? self.path.startsWith(''/'') + : true' + - message: PVC path must be a file name only, with no + slashes. + rule: '(has(self.pvc) && has(self.path)) ? !self.path.startsWith(''/'') + : true' + - message: Online store does not support S3 or GS buckets. + rule: 'has(self.path) ? !(self.path.startsWith(''s3://'') + || self.path.startsWith(''gs://'')) : true' + store: + description: OnlineStoreDBStorePersistence configures + the DB store persistence for the online store service + properties: + secretKeyName: + description: By default, the selected store "type" + is used as the SecretKeyName + type: string + secretRef: + description: Data store parameters should be placed + as-is from the "feature_store.yaml" under the secret + key. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + type: + description: Type of the persistence type you want + to use. + enum: + - snowflake.online + - redis + - ikv + - datastore + - dynamodb + - bigtable + - postgres + - cassandra + - mysql + - hazelcast + - singlestore + - hbase + - elasticsearch + - qdrant + - couchbase.online + - milvus + type: string + required: + - secretRef + - type + type: object + type: object + x-kubernetes-validations: + - message: One selection required between file or store. + rule: '[has(self.file), has(self.store)].exists_one(c, c)' + server: + description: Creates a feature server container + properties: + env: + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. Must + be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: 'Selects a field of the pod: supports + metadata.name, metadata.namespace, `metadata.labels['''']`, + `metadata.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the + pod's namespace + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the ConfigMap must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to + each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + type: string + imagePullPolicy: + description: PullPolicy describes a policy for if/when + to pull a container image + type: string + logLevel: + description: |- + LogLevel sets the logging level for the server + Allowed values: "debug", "info", "warning", "error", "critical". + enum: + - debug + - info + - warning + - error + - critical + type: string + metrics: + description: Metrics exposes Prometheus-compatible metrics + for the Feast server when enabled. + type: boolean + nodeSelector: + additionalProperties: + type: string + type: object + resources: + description: ResourceRequirements describes the compute + resource requirements. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes. + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Requests describes the minimum amount + of compute resources required. + type: object + type: object + tls: + description: TlsConfigs configures server TLS for a feast + service. + properties: + disable: + description: will disable TLS for the feast service. + useful in an openshift cluster, for example, where + TLS is configured by default + type: boolean + secretKeyNames: + description: SecretKeyNames defines the secret key + names for the TLS key and cert. + properties: + tlsCrt: + description: defaults to "tls.crt" + type: string + tlsKey: + description: defaults to "tls.key" + type: string + type: object + secretRef: + description: references the local k8s secret where + the TLS key and cert reside + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + type: object + x-kubernetes-validations: + - message: '`secretRef` required if `disable` is false.' + rule: '(!has(self.disable) || !self.disable) ? has(self.secretRef) + : true' + volumeMounts: + description: VolumeMounts defines the list of volumes + that should be mounted into the feast container. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from + which the container's volume should be mounted. + type: string + required: + - mountPath + - name + type: object + type: array + type: object + type: object + registry: + description: Registry configures the registry service. One selection + is required. Local is the default setting. + properties: + local: + description: LocalRegistryConfig configures the registry service + properties: + persistence: + description: RegistryPersistence configures the persistence + settings for the registry service + properties: + file: + description: RegistryFilePersistence configures the + file-based persistence for the registry service + properties: + cache_mode: + description: |- + CacheMode defines the registry cache update strategy. + Allowed values are "sync" and "thread". + enum: + - none + - sync + - thread + type: string + cache_ttl_seconds: + description: CacheTTLSeconds defines the TTL (in + seconds) for the registry cache. + format: int32 + minimum: 0 + type: integer + path: + type: string + pvc: + description: PvcConfig defines the settings for + a persistent file store based on PVCs. + properties: + create: + description: Settings for creating a new PVC + properties: + accessModes: + description: AccessModes k8s persistent + volume access modes. Defaults to ["ReadWriteOnce"]. + items: + type: string + type: array + resources: + description: Resources describes the storage + resource requirements for a volume. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes. + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Requests describes the + minimum amount of compute resources + required. + type: object + type: object + storageClassName: + description: StorageClassName is the name + of an existing StorageClass to which + this persistent volume belongs. + type: string + type: object + x-kubernetes-validations: + - message: PvcCreate is immutable + rule: self == oldSelf + mountPath: + description: |- + MountPath within the container at which the volume should be mounted. + Must start by "/" and cannot contain ':'. + type: string + ref: + description: Reference to an existing field + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + required: + - mountPath + type: object + x-kubernetes-validations: + - message: One selection is required between ref + and create. + rule: '[has(self.ref), has(self.create)].exists_one(c, + c)' + - message: Mount path must start with '/' and + must not contain ':' + rule: self.mountPath.matches('^/[^:]*$') + s3_additional_kwargs: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-validations: + - message: Registry files must use absolute paths + or be S3 ('s3://') or GS ('gs://') object store + URIs. + rule: '(!has(self.pvc) && has(self.path)) ? (self.path.startsWith(''/'') + || self.path.startsWith(''s3://'') || self.path.startsWith(''gs://'')) + : true' + - message: PVC path must be a file name only, with + no slashes. + rule: '(has(self.pvc) && has(self.path)) ? !self.path.startsWith(''/'') + : true' + - message: PVC persistence does not support S3 or + GS object store URIs. + rule: '(has(self.pvc) && has(self.path)) ? !(self.path.startsWith(''s3://'') + || self.path.startsWith(''gs://'')) : true' + - message: Additional S3 settings are available only + for S3 object store URIs. + rule: '(has(self.s3_additional_kwargs) && has(self.path)) + ? self.path.startsWith(''s3://'') : true' + store: + description: RegistryDBStorePersistence configures + the DB store persistence for the registry service + properties: + secretKeyName: + description: By default, the selected store "type" + is used as the SecretKeyName + type: string + secretRef: + description: Data store parameters should be placed + as-is from the "feature_store.yaml" under the + secret key. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + type: + description: Type of the persistence type you + want to use. + enum: + - sql + - snowflake.registry + type: string + required: + - secretRef + - type + type: object + type: object + x-kubernetes-validations: + - message: One selection required between file or store. + rule: '[has(self.file), has(self.store)].exists_one(c, + c)' + server: + description: Creates a registry server container + properties: + env: + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: 'Selects a field of the pod: + supports metadata.name, metadata.namespace, + `metadata.labels['''']`, `metadata.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, + defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults + to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to + select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + items: + description: EnvFromSource represents the source + of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the Secret + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + grpc: + description: Enable gRPC registry server. Defaults + to true if unset. + type: boolean + image: + type: string + imagePullPolicy: + description: PullPolicy describes a policy for if/when + to pull a container image + type: string + logLevel: + description: |- + LogLevel sets the logging level for the server + Allowed values: "debug", "info", "warning", "error", "critical". + enum: + - debug + - info + - warning + - error + - critical + type: string + metrics: + description: Metrics exposes Prometheus-compatible + metrics for the Feast server when enabled. + type: boolean + nodeSelector: + additionalProperties: + type: string + type: object + resources: + description: ResourceRequirements describes the compute + resource requirements. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes. + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Requests describes the minimum amount + of compute resources required. + type: object + type: object + restAPI: + description: Enable REST API registry server. + type: boolean + tls: + description: TlsConfigs configures server TLS for + a feast service. + properties: + disable: + description: will disable TLS for the feast service. + useful in an openshift cluster, for example, + where TLS is configured by default + type: boolean + secretKeyNames: + description: SecretKeyNames defines the secret + key names for the TLS key and cert. + properties: + tlsCrt: + description: defaults to "tls.crt" + type: string + tlsKey: + description: defaults to "tls.key" + type: string + type: object + secretRef: + description: references the local k8s secret where + the TLS key and cert reside + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + type: object + x-kubernetes-validations: + - message: '`secretRef` required if `disable` is false.' + rule: '(!has(self.disable) || !self.disable) ? has(self.secretRef) + : true' + volumeMounts: + description: VolumeMounts defines the list of volumes + that should be mounted into the feast container. + items: + description: VolumeMount describes a mounting of + a Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume + from which the container's volume should be + mounted. + type: string + required: + - mountPath + - name + type: object + type: array + type: object + x-kubernetes-validations: + - message: At least one of restAPI or grpc must be true + rule: self.restAPI == true || self.grpc == true || !has(self.grpc) + type: object + remote: + description: RemoteRegistryConfig points to a remote feast + registry server. + properties: + feastRef: + description: Reference to an existing `FeatureStore` CR + in the same k8s cluster. + properties: + name: + description: Name of the FeatureStore + type: string + namespace: + description: Namespace of the FeatureStore + type: string + required: + - name + type: object + hostname: + description: Host address of the remote registry service + - :, e.g. `registry..svc.cluster.local:80` + type: string + tls: + description: TlsRemoteRegistryConfigs configures client + TLS for a remote feast registry. + properties: + certName: + description: defines the configmap key name for the + client TLS cert. + type: string + configMapRef: + description: references the local k8s configmap where + the TLS cert resides + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + required: + - certName + - configMapRef + type: object + type: object + x-kubernetes-validations: + - message: One selection required. + rule: '[has(self.hostname), has(self.feastRef)].exists_one(c, + c)' + type: object + x-kubernetes-validations: + - message: One selection required. + rule: '[has(self.local), has(self.remote)].exists_one(c, c)' + securityContext: + description: PodSecurityContext holds pod-level security attributes + and common container settings. + properties: + appArmorProfile: + description: appArmorProfile is the AppArmor options to use + by the containers in this pod. + properties: + localhostProfile: + description: localhostProfile indicates a profile loaded + on the node that should be used. + type: string + type: + description: type indicates which kind of AppArmor profile + will be applied. + type: string + required: + - type + type: object + fsGroup: + description: A special supplemental group that applies to + all containers in a pod. + format: int64 + type: integer + fsGroupChangePolicy: + description: |- + fsGroupChangePolicy defines behavior of changing ownership and permission of the volume + before being exposed inside Pod. + type: string + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as a non-root + user. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to all containers. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by the containers in this pod. + Note that this field cannot be set when spec.os. + properties: + localhostProfile: + description: localhostProfile indicates a profile defined + in a file on the node should be used. + type: string + type: + description: type indicates which kind of seccomp profile + will be applied. + type: string + required: + - type + type: object + supplementalGroups: + description: |- + A list of groups applied to the first process run in each container, in addition + to the container's primary GID, the fsG + items: + format: int64 + type: integer + type: array + x-kubernetes-list-type: atomic + sysctls: + description: Sysctls hold a list of namespaced sysctls used + for the pod. + items: + description: Sysctl defines a kernel parameter to be set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + windowsOptions: + description: The Windows specific settings applied to all + containers. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of the + GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container should + be run as a 'Host Process' container. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint + of the container process. + type: string + type: object + type: object + ui: + description: Creates a UI server container + properties: + env: + items: + description: EnvVar represents an environment variable present + in a Container. + properties: + name: + description: Name of the environment variable. Must + be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any + type: string + valueFrom: + description: Source for the environment variable's value. + Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the ConfigMap or + its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: 'Selects a field of the pod: supports + metadata.name, metadata.namespace, `metadata.labels['''']`, + `metadata.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits. + properties: + containerName: + description: 'Container name: required for volumes, + optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the pod's + namespace + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the Secret or its + key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + items: + description: EnvFromSource represents the source of a set + of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the ConfigMap must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to each + key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the Secret must be + defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + type: string + imagePullPolicy: + description: PullPolicy describes a policy for if/when to + pull a container image + type: string + logLevel: + description: |- + LogLevel sets the logging level for the server + Allowed values: "debug", "info", "warning", "error", "critical". + enum: + - debug + - info + - warning + - error + - critical + type: string + metrics: + description: Metrics exposes Prometheus-compatible metrics + for the Feast server when enabled. + type: boolean + nodeSelector: + additionalProperties: + type: string + type: object + resources: + description: ResourceRequirements describes the compute resource + requirements. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes. + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Requests describes the minimum amount of + compute resources required. + type: object + type: object + tls: + description: TlsConfigs configures server TLS for a feast + service. + properties: + disable: + description: will disable TLS for the feast service. useful + in an openshift cluster, for example, where TLS is configured + by default + type: boolean + secretKeyNames: + description: SecretKeyNames defines the secret key names + for the TLS key and cert. + properties: + tlsCrt: + description: defaults to "tls.crt" + type: string + tlsKey: + description: defaults to "tls.key" + type: string + type: object + secretRef: + description: references the local k8s secret where the + TLS key and cert reside + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + type: object + x-kubernetes-validations: + - message: '`secretRef` required if `disable` is false.' + rule: '(!has(self.disable) || !self.disable) ? has(self.secretRef) + : true' + volumeMounts: + description: VolumeMounts defines the list of volumes that + should be mounted into the feast container. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from which + the container's volume should be mounted. + type: string + required: + - mountPath + - name + type: object + type: array + type: object + volumes: + description: Volumes specifies the volumes to mount in the FeatureStore + deployment. + items: + description: Volume represents a named volume in a pod that + may be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: |- + awsElasticBlockStore represents an AWS Disk resource that is attached to a + kubelet's host machine and then exposed to th + properties: + fsType: + description: fsType is the filesystem type of the volume + that you want to mount. + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + format: int32 + type: integer + readOnly: + description: |- + readOnly value true will force the readOnly setting in VolumeMounts. + More info: https://kubernetes. + type: boolean + volumeID: + description: |- + volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). + More info: https://kubernetes. + type: string + required: + - volumeID + type: object + azureDisk: + description: azureDisk represents an Azure Data Disk mount + on the host and bind mount to the pod. + properties: + cachingMode: + description: 'cachingMode is the Host Caching mode: + None, Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data disk in + the blob storage + type: string + diskURI: + description: diskURI is the URI of data disk in the + blob storage + type: string + fsType: + description: |- + fsType is Filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. + type: string + kind: + description: 'kind expected values are Shared: multiple + blob disks per storage account Dedicated: single + blob disk per storage accoun' + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: azureFile represents an Azure File Service + mount on the host and bind mount to the pod. + properties: + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret that + contains Azure Storage Account Name and Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: cephFS represents a Ceph FS mount on the host + that shares a pod's lifetime + properties: + monitors: + description: |- + monitors is Required: Monitors is a collection of Ceph monitors + More info: https://examples.k8s. + items: + type: string + type: array + x-kubernetes-list-type: atomic + path: + description: 'path is Optional: Used as the mounted + root, rather than the full Ceph tree, default is /' + type: string + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretFile: + description: 'secretFile is Optional: SecretFile is + the path to key ring for User, default is /etc/ceph/user.' + type: string + secretRef: + description: 'secretRef is Optional: SecretRef is reference + to the authentication secret for User, default is + empty.' + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is optional: User is the rados user name, default is admin + More info: https://examples.k8s. + type: string + required: + - monitors + type: object + cinder: + description: |- + cinder represents a cinder volume attached and mounted on kubelets host machine. + More info: https://examples.k8s. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef is optional: points to a secret object containing parameters used to connect + to OpenStack. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + description: |- + volumeID used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that should + populate this volume + properties: + defaultMode: + description: 'defaultMode is optional: mode bits used + to set permissions on created files by default.' + format: int32 + type: integer + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volum + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits used + to set permissions on this file.' + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + description: csi (Container Storage Interface) represents + ephemeral storage that is handled by certain external + CSI drivers (Beta fea + properties: + driver: + description: driver is the name of the CSI driver that + handles this volume. + type: string + fsType: + description: fsType to mount. Ex. "ext4", "xfs", "ntfs". + type: string + nodePublishSecretRef: + description: |- + nodePublishSecretRef is a reference to the secret object containing + sensitive information to pass to the CSI driver to c + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + description: |- + readOnly specifies a read-only configuration for the volume. + Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: |- + volumeAttributes stores driver-specific properties that are passed to the CSI + driver. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API about the + pod that should populate this volume + properties: + defaultMode: + description: 'Optional: mode bits to use on created + files by default.' + format: int32 + type: integer + items: + description: Items is a list of downward API volume + file + items: + description: DownwardAPIVolumeFile represents information + to create the file containing the pod field + properties: + fieldRef: + description: 'Required: Selects a field of the + pod: only annotations, labels, name, namespace + and uid are supported.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select in + the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal valu + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must not + be absolute or contain the ''..'' path.' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format of + the exposed resources, defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + emptyDir: + description: |- + emptyDir represents a temporary directory that shares a pod's lifetime. + More info: https://kubernetes. + properties: + medium: + description: medium represents what type of storage + medium should back this directory. + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: sizeLimit is the total amount of local + storage required for this EmptyDir volume. + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: ephemeral represents a volume that is handled + by a cluster storage driver. + properties: + volumeClaimTemplate: + description: Will be used to create a stand-alone PVC + to provision the volume. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. + type: object + spec: + description: The specification for the PersistentVolumeClaim. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes. + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s. + properties: + apiGroup: + description: APIGroup is the group for the + resource being referenced. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. + properties: + apiGroup: + description: APIGroup is the group for the + resource being referenced. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking. + type: string + required: + - kind + - name + type: object + resources: + description: resources represents the minimum + resources the volume should have. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes. + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Requests describes the minimum + amount of compute resources required. + type: object + type: object + selector: + description: selector is a label query over + volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The requirements + are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes. + type: string + volumeAttributesClassName: + description: volumeAttributesClassName may be + used to set the VolumeAttributesClass used + by this claim. + type: string + volumeMode: + description: volumeMode defines what type of + volume is required by the claim. + type: string + volumeName: + description: volumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource that + is attached to a kubelet's host machine and then exposed + to the pod. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. + type: string + lun: + description: 'lun is Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target worldwide + names (WWNs)' + items: + type: string + type: array + x-kubernetes-list-type: atomic + wwids: + description: "wwids Optional: FC volume world wide identifiers + (wwids)\nEither wwids or combination of targetWWNs + and lun must be set, " + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + flexVolume: + description: |- + flexVolume represents a generic volume resource that is + provisioned/attached using an exec based plugin. + properties: + driver: + description: driver is the name of the driver to use + for this volume. + type: string + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field holds + extra command options if any.' + type: object + readOnly: + description: |- + readOnly is Optional: defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef is Optional: secretRef is reference to the secret object containing + sensitive information to pass to the plugi + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + description: flocker represents a Flocker volume attached + to a kubelet's host machine. + properties: + datasetName: + description: |- + datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker + should be considered as depreca + type: string + datasetUUID: + description: datasetUUID is the UUID of the dataset. + This is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: |- + gcePersistentDisk represents a GCE Disk resource that is attached to a + kubelet's host machine and then exposed to the po + properties: + fsType: + description: fsType is filesystem type of the volume + that you want to mount. + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + format: int32 + type: integer + pdName: + description: |- + pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. + More info: https://kubernetes. + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://kubernetes. + type: boolean + required: + - pdName + type: object + gitRepo: + description: |- + gitRepo represents a git repository at a particular revision. + DEPRECATED: GitRepo is deprecated. + properties: + directory: + description: |- + directory is the target directory name. + Must not contain or start with '..'. If '. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the specified + revision. + type: string + required: + - repository + type: object + glusterfs: + description: |- + glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + More info: https://examples.k8s. + properties: + endpoints: + description: |- + endpoints is the endpoint name that details Glusterfs topology. + More info: https://examples.k8s. + type: string + path: + description: |- + path is the Glusterfs volume path. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + readOnly: + description: |- + readOnly here will force the Glusterfs volume to be mounted with read-only permissions. + Defaults to false. + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: |- + hostPath represents a pre-existing file or directory on the host + machine that is directly exposed to the container. + properties: + path: + description: |- + path of the directory on the host. + If the path is a symlink, it will follow the link to the real path. + type: string + type: + description: |- + type for HostPath Volume + Defaults to "" + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + required: + - path + type: object + iscsi: + description: |- + iscsi represents an ISCSI Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether support + iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether support + iSCSI Session CHAP authentication + type: boolean + fsType: + description: fsType is the filesystem type of the volume + that you want to mount. + type: string + initiatorName: + description: initiatorName is the custom iSCSI Initiator + Name. + type: string + iqn: + description: iqn is the target iSCSI Qualified Name. + type: string + iscsiInterface: + description: |- + iscsiInterface is the interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: portals is the iSCSI Target Portal List. + items: + type: string + type: array + x-kubernetes-list-type: atomic + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for iSCSI + target and initiator authentication + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + description: targetPortal is iSCSI Target Portal. + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: |- + name of the volume. + Must be a DNS_LABEL and unique within the pod. + More info: https://kubernetes. + type: string + nfs: + description: |- + nfs represents an NFS mount on the host that shares a pod's lifetime + More info: https://kubernetes. + properties: + path: + description: |- + path that is exported by the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + readOnly: + description: |- + readOnly here will force the NFS export to be mounted with read-only permissions. + Defaults to false. + type: boolean + server: + description: |- + server is the hostname or IP address of the NFS server. + More info: https://kubernetes. + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: |- + persistentVolumeClaimVolumeSource represents a reference to a + PersistentVolumeClaim in the same namespace. + properties: + claimName: + description: claimName is the name of a PersistentVolumeClaim + in the same namespace as the pod using this volume. + type: string + readOnly: + description: |- + readOnly Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: photonPersistentDisk represents a PhotonController + persistent disk attached and mounted on kubelets host + machine + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. + type: string + pdID: + description: pdID is the ID that identifies Photon Controller + persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: portworxVolume represents a portworx volume + attached and mounted on kubelets host machine + properties: + fsType: + description: |- + fSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating system. + Ex. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a Portworx + volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources secrets, + configmaps, and downward API + properties: + defaultMode: + description: defaultMode are the mode bits used to set + permissions on created files by default. + format: int32 + type: integer + sources: + description: sources is the list of volume projections + items: + description: Projection that may be projected along + with other supported volume types + properties: + clusterTrustBundle: + description: ClusterTrustBundle allows a pod to + access the `.spec. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label key + that the selector applies to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} + pairs. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. + type: boolean + path: + description: Relative path from the volume + root to write the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. + type: string + required: + - path + type: object + configMap: + description: configMap information about the configMap + data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volum + items: + description: Maps a string key to a path + within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode + bits used to set permissions on this + file.' + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: optional specify whether the + ConfigMap or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about the + downwardAPI data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects a field + of the pod: only annotations, labels, + name, namespace and uid are supported.' + properties: + apiVersion: + description: Version of the schema + the FieldPath is written in terms + of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to + select in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal valu + format: int32 + type: integer + path: + description: 'Required: Path is the + relative path name of the file to + be created. Must not be absolute or + contain the ''..'' path.' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env + vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource + to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + description: secret information about the secret + data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume a + items: + description: Maps a string key to a path + within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode + bits used to set permissions on this + file.' + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: optional field specify whether + the Secret or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information + about the serviceAccountToken data to project + properties: + audience: + description: audience is the intended audience + of the token. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + quobyte: + description: quobyte represents a Quobyte mount on the host + that shares a pod's lifetime + properties: + group: + description: |- + group to map volume access to + Default is no group + type: string + readOnly: + description: |- + readOnly here will force the Quobyte volume to be mounted with read-only permissions. + Defaults to false. + type: boolean + registry: + description: |- + registry represents a single or multiple Quobyte Registry services + specified as a string as host:port pair (multiple ent + type: string + tenant: + description: |- + tenant owning the given Quobyte volume in the Backend + Used with dynamically provisioned Quobyte volumes, value is set by + type: string + user: + description: |- + user to map volume access to + Defaults to serivceaccount user + type: string + volume: + description: volume is a string that references an already + created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: |- + rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. + More info: https://examples.k8s. + properties: + fsType: + description: fsType is the filesystem type of the volume + that you want to mount. + type: string + image: + description: |- + image is the rados image name. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + keyring: + description: |- + keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. + More info: https://examples.k8s. + type: string + monitors: + description: |- + monitors is a collection of Ceph monitors. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + pool: + description: |- + pool is the rados pool name. + Default is rbd. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://examples.k8s. + type: boolean + secretRef: + description: |- + secretRef is name of the authentication secret for RBDUser. If provided + overrides keyring. + Default is nil. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is the rados user name. + Default is admin. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + required: + - image + - monitors + type: object + scaleIO: + description: scaleIO represents a ScaleIO persistent volume + attached and mounted on Kubernetes nodes. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. + type: string + gateway: + description: gateway is the host address of the ScaleIO + API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of the ScaleIO + Protection Domain for the configured storage. + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef references to the secret for ScaleIO user and other + sensitive information. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + description: sslEnabled Flag enable/disable SSL communication + with Gateway, default false + type: boolean + storageMode: + description: storageMode indicates whether the storage + for a volume should be ThickProvisioned or ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage Pool + associated with the protection domain. + type: string + system: + description: system is the name of the storage system + as configured in ScaleIO. + type: string + volumeName: + description: |- + volumeName is the name of a volume already created in the ScaleIO system + that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: |- + secret represents a secret that should populate this volume. + More info: https://kubernetes. + properties: + defaultMode: + description: 'defaultMode is Optional: mode bits used + to set permissions on created files by default.' + format: int32 + type: integer + items: + description: |- + items If unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume a + items: + description: Maps a string key to a path within a + volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits used + to set permissions on this file.' + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + optional: + description: optional field specify whether the Secret + or its keys must be defined + type: boolean + secretName: + description: |- + secretName is the name of the secret in the pod's namespace to use. + More info: https://kubernetes. + type: string + type: object + storageos: + description: storageOS represents a StorageOS volume attached + and mounted on Kubernetes nodes. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef specifies the secret to use for obtaining the StorageOS API + credentials. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + description: |- + volumeName is the human-readable name of the StorageOS volume. Volume + names are only unique within a namespace. + type: string + volumeNamespace: + description: volumeNamespace specifies the scope of + the volume within StorageOS. + type: string + type: object + vsphereVolume: + description: vsphereVolume represents a vSphere volume attached + and mounted on kubelets host machine + properties: + fsType: + description: |- + fsType is filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. + type: string + storagePolicyID: + description: storagePolicyID is the storage Policy Based + Management (SPBM) profile ID associated with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage Policy + Based Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies + vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + type: object + required: + - feastProject + type: object + status: + description: FeatureStoreStatus defines the observed state of FeatureStore + properties: + applied: + description: Shows the currently applied feast configuration, including + any pertinent defaults + properties: + authz: + description: AuthzConfig defines the authorization settings for + the deployed Feast services. + properties: + kubernetes: + description: |- + KubernetesAuthz provides a way to define the authorization settings using Kubernetes RBAC resources. + https://kubernetes. + properties: + roles: + description: The Kubernetes RBAC roles to be deployed + in the same namespace of the FeatureStore. + items: + type: string + type: array + type: object + oidc: + description: |- + OidcAuthz defines the authorization settings for deployments using an Open ID Connect identity provider. + https://auth0. + properties: + secretRef: + description: |- + LocalObjectReference contains enough information to let you locate the + referenced object inside the same namespace. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + required: + - secretRef + type: object + type: object + x-kubernetes-validations: + - message: One selection required between kubernetes or oidc. + rule: '[has(self.kubernetes), has(self.oidc)].exists_one(c, + c)' + cronJob: + description: FeastCronJob defines a CronJob to execute against + a Feature Store deployment. + properties: + annotations: + additionalProperties: + type: string + description: Annotations to be added to the CronJob metadata. + type: object + concurrencyPolicy: + description: Specifies how to treat concurrent executions + of a Job. + type: string + containerConfigs: + description: CronJobContainerConfigs k8s container settings + for the CronJob + properties: + commands: + description: Array of commands to be executed (in order) + against a Feature Store deployment. + items: + type: string + type: array + env: + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. Must + be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: 'Selects a field of the pod: supports + metadata.name, metadata.namespace, `metadata.labels['''']`, + `metadata.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the + pod's namespace + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the ConfigMap must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to + each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + type: string + imagePullPolicy: + description: PullPolicy describes a policy for if/when + to pull a container image + type: string + nodeSelector: + additionalProperties: + type: string + type: object + resources: + description: ResourceRequirements describes the compute + resource requirements. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes. + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Requests describes the minimum amount + of compute resources required. + type: object + type: object + type: object + failedJobsHistoryLimit: + description: The number of failed finished jobs to retain. + Value must be non-negative integer. + format: int32 + type: integer + jobSpec: + description: Specification of the desired behavior of a job. + properties: + activeDeadlineSeconds: + description: |- + Specifies the duration in seconds relative to the startTime that the job + may be continuously active before the system tr + format: int64 + type: integer + backoffLimit: + description: Specifies the number of retries before marking + this job failed. + format: int32 + type: integer + backoffLimitPerIndex: + description: |- + Specifies the limit for the number of retries within an + index before marking this index as failed. + format: int32 + type: integer + completionMode: + description: |- + completionMode specifies how Pod completions are tracked. It can be + `NonIndexed` (default) or `Indexed`. + type: string + completions: + description: |- + Specifies the desired number of successfully finished pods the + job should be run with. + format: int32 + type: integer + maxFailedIndexes: + description: |- + Specifies the maximal number of failed indexes before marking the Job as + failed, when backoffLimitPerIndex is set. + format: int32 + type: integer + parallelism: + description: |- + Specifies the maximum desired number of pods the job should + run at any given time. + format: int32 + type: integer + podFailurePolicy: + description: Specifies the policy of handling failed pods. + properties: + rules: + description: A list of pod failure policy rules. The + rules are evaluated in order. + items: + description: PodFailurePolicyRule describes how + a pod failure is handled when the requirements + are met. + properties: + action: + description: Specifies the action taken on a + pod failure when the requirements are satisfied. + type: string + onExitCodes: + description: Represents the requirement on the + container exit codes. + properties: + containerName: + description: |- + Restricts the check for exit codes to the container with the + specified name. + type: string + operator: + description: |- + Represents the relationship between the container exit code(s) and the + specified values. + type: string + values: + description: Specifies the set of values. + items: + format: int32 + type: integer + type: array + x-kubernetes-list-type: set + required: + - operator + - values + type: object + onPodConditions: + description: |- + Represents the requirement on the pod conditions. The requirement is represented + as a list of pod condition patterns. + items: + description: |- + PodFailurePolicyOnPodConditionsPattern describes a pattern for matching + an actual pod condition type. + properties: + status: + description: Specifies the required Pod + condition status. + type: string + type: + description: Specifies the required Pod + condition type. + type: string + required: + - status + - type + type: object + type: array + x-kubernetes-list-type: atomic + required: + - action + type: object + type: array + x-kubernetes-list-type: atomic + required: + - rules + type: object + podReplacementPolicy: + description: podReplacementPolicy specifies when to create + replacement Pods. + type: string + podTemplateAnnotations: + additionalProperties: + type: string + description: |- + PodTemplateAnnotations are annotations to be applied to the CronJob's PodTemplate + metadata. + type: object + suspend: + description: suspend specifies whether the Job controller + should create Pods or not. + type: boolean + ttlSecondsAfterFinished: + description: |- + ttlSecondsAfterFinished limits the lifetime of a Job that has finished + execution (either Complete or Failed). + format: int32 + type: integer + type: object + schedule: + description: The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. + type: string + startingDeadlineSeconds: + description: |- + Optional deadline in seconds for starting the job if it misses scheduled + time for any reason. + format: int64 + type: integer + successfulJobsHistoryLimit: + description: The number of successful finished jobs to retain. + Value must be non-negative integer. + format: int32 + type: integer + suspend: + description: |- + This flag tells the controller to suspend subsequent executions, it does + not apply to already started executions. + type: boolean + timeZone: + description: The time zone name for the given schedule, see + https://en.wikipedia.org/wiki/List_of_tz_database_time_zones. + type: string + type: object + feastProject: + description: FeastProject is the Feast project id. + pattern: ^[A-Za-z0-9][A-Za-z0-9_-]*$ + type: string + feastProjectDir: + description: FeastProjectDir defines how to create the feast project + directory. + properties: + git: + description: GitCloneOptions describes how a clone should + be performed. + properties: + configs: + additionalProperties: + type: string + description: |- + Configs passed to git via `-c` + e.g. http.sslVerify: 'false' + OR 'url."https://api:\${TOKEN}@github.com/". + type: object + env: + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. Must + be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: 'Selects a field of the pod: supports + metadata.name, metadata.namespace, `metadata.labels['''']`, + `metadata.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the + pod's namespace + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the ConfigMap must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to + each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + featureRepoPath: + description: FeatureRepoPath is the relative path to the + feature repo subdirectory. Default is 'feature_repo'. + type: string + ref: + description: Reference to a branch / tag / commit + type: string + url: + description: The repository URL to clone from. + type: string + required: + - url + type: object + x-kubernetes-validations: + - message: RepoPath must be a file name only, with no slashes. + rule: 'has(self.featureRepoPath) ? !self.featureRepoPath.startsWith(''/'') + : true' + init: + description: FeastInitOptions defines how to run a `feast + init`. + properties: + minimal: + type: boolean + template: + description: Template for the created project + enum: + - local + - gcp + - aws + - snowflake + - spark + - postgres + - hbase + - cassandra + - hazelcast + - ikv + - couchbase + - clickhouse + type: string + type: object + type: object + x-kubernetes-validations: + - message: One selection required between init or git. + rule: '[has(self.git), has(self.init)].exists_one(c, c)' + services: + description: FeatureStoreServices defines the desired feast services. + An ephemeral onlineStore feature server is deployed by default. + properties: + deploymentStrategy: + description: DeploymentStrategy describes how to replace existing + pods with new ones. + properties: + rollingUpdate: + description: |- + Rolling update config params. Present only if DeploymentStrategyType = + RollingUpdate. + properties: + maxSurge: + anyOf: + - type: integer + - type: string + description: |- + The maximum number of pods that can be scheduled above the desired number of + pods. + x-kubernetes-int-or-string: true + maxUnavailable: + anyOf: + - type: integer + - type: string + description: The maximum number of pods that can be + unavailable during the update. + x-kubernetes-int-or-string: true + type: object + type: + description: Type of deployment. Can be "Recreate" or + "RollingUpdate". Default is RollingUpdate. + type: string + type: object + disableInitContainers: + description: Disable the 'feast repo initialization' initContainer + type: boolean + offlineStore: + description: OfflineStore configures the offline store service + properties: + persistence: + description: OfflineStorePersistence configures the persistence + settings for the offline store service + properties: + file: + description: OfflineStoreFilePersistence configures + the file-based persistence for the offline store + service + properties: + pvc: + description: PvcConfig defines the settings for + a persistent file store based on PVCs. + properties: + create: + description: Settings for creating a new PVC + properties: + accessModes: + description: AccessModes k8s persistent + volume access modes. Defaults to ["ReadWriteOnce"]. + items: + type: string + type: array + resources: + description: Resources describes the storage + resource requirements for a volume. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes. + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Requests describes the + minimum amount of compute resources + required. + type: object + type: object + storageClassName: + description: StorageClassName is the name + of an existing StorageClass to which + this persistent volume belongs. + type: string + type: object + x-kubernetes-validations: + - message: PvcCreate is immutable + rule: self == oldSelf + mountPath: + description: |- + MountPath within the container at which the volume should be mounted. + Must start by "/" and cannot contain ':'. + type: string + ref: + description: Reference to an existing field + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + required: + - mountPath + type: object + x-kubernetes-validations: + - message: One selection is required between ref + and create. + rule: '[has(self.ref), has(self.create)].exists_one(c, + c)' + - message: Mount path must start with '/' and + must not contain ':' + rule: self.mountPath.matches('^/[^:]*$') + type: + enum: + - file + - dask + - duckdb + type: string + type: object + store: + description: OfflineStoreDBStorePersistence configures + the DB store persistence for the offline store service + properties: + secretKeyName: + description: By default, the selected store "type" + is used as the SecretKeyName + type: string + secretRef: + description: Data store parameters should be placed + as-is from the "feature_store.yaml" under the + secret key. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + type: + description: Type of the persistence type you + want to use. + enum: + - snowflake.offline + - bigquery + - redshift + - spark + - postgres + - trino + - athena + - mssql + - couchbase.offline + - clickhouse + - ray + type: string + required: + - secretRef + - type + type: object + type: object + x-kubernetes-validations: + - message: One selection required between file or store. + rule: '[has(self.file), has(self.store)].exists_one(c, + c)' + server: + description: Creates a remote offline server container + properties: + env: + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: 'Selects a field of the pod: + supports metadata.name, metadata.namespace, + `metadata.labels['''']`, `metadata.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, + defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults + to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to + select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + items: + description: EnvFromSource represents the source + of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the Secret + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + type: string + imagePullPolicy: + description: PullPolicy describes a policy for if/when + to pull a container image + type: string + logLevel: + description: |- + LogLevel sets the logging level for the server + Allowed values: "debug", "info", "warning", "error", "critical". + enum: + - debug + - info + - warning + - error + - critical + type: string + metrics: + description: Metrics exposes Prometheus-compatible + metrics for the Feast server when enabled. + type: boolean + nodeSelector: + additionalProperties: + type: string + type: object + resources: + description: ResourceRequirements describes the compute + resource requirements. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes. + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Requests describes the minimum amount + of compute resources required. + type: object + type: object + tls: + description: TlsConfigs configures server TLS for + a feast service. + properties: + disable: + description: will disable TLS for the feast service. + useful in an openshift cluster, for example, + where TLS is configured by default + type: boolean + secretKeyNames: + description: SecretKeyNames defines the secret + key names for the TLS key and cert. + properties: + tlsCrt: + description: defaults to "tls.crt" + type: string + tlsKey: + description: defaults to "tls.key" + type: string + type: object + secretRef: + description: references the local k8s secret where + the TLS key and cert reside + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + type: object + x-kubernetes-validations: + - message: '`secretRef` required if `disable` is false.' + rule: '(!has(self.disable) || !self.disable) ? has(self.secretRef) + : true' + volumeMounts: + description: VolumeMounts defines the list of volumes + that should be mounted into the feast container. + items: + description: VolumeMount describes a mounting of + a Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume + from which the container's volume should be + mounted. + type: string + required: + - mountPath + - name + type: object + type: array + type: object + type: object + onlineStore: + description: OnlineStore configures the online store service + properties: + persistence: + description: OnlineStorePersistence configures the persistence + settings for the online store service + properties: + file: + description: OnlineStoreFilePersistence configures + the file-based persistence for the online store + service + properties: + path: + type: string + pvc: + description: PvcConfig defines the settings for + a persistent file store based on PVCs. + properties: + create: + description: Settings for creating a new PVC + properties: + accessModes: + description: AccessModes k8s persistent + volume access modes. Defaults to ["ReadWriteOnce"]. + items: + type: string + type: array + resources: + description: Resources describes the storage + resource requirements for a volume. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes. + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Requests describes the + minimum amount of compute resources + required. + type: object + type: object + storageClassName: + description: StorageClassName is the name + of an existing StorageClass to which + this persistent volume belongs. + type: string + type: object + x-kubernetes-validations: + - message: PvcCreate is immutable + rule: self == oldSelf + mountPath: + description: |- + MountPath within the container at which the volume should be mounted. + Must start by "/" and cannot contain ':'. + type: string + ref: + description: Reference to an existing field + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + required: + - mountPath + type: object + x-kubernetes-validations: + - message: One selection is required between ref + and create. + rule: '[has(self.ref), has(self.create)].exists_one(c, + c)' + - message: Mount path must start with '/' and + must not contain ':' + rule: self.mountPath.matches('^/[^:]*$') + type: object + x-kubernetes-validations: + - message: Ephemeral stores must have absolute paths. + rule: '(!has(self.pvc) && has(self.path)) ? self.path.startsWith(''/'') + : true' + - message: PVC path must be a file name only, with + no slashes. + rule: '(has(self.pvc) && has(self.path)) ? !self.path.startsWith(''/'') + : true' + - message: Online store does not support S3 or GS + buckets. + rule: 'has(self.path) ? !(self.path.startsWith(''s3://'') + || self.path.startsWith(''gs://'')) : true' + store: + description: OnlineStoreDBStorePersistence configures + the DB store persistence for the online store service + properties: + secretKeyName: + description: By default, the selected store "type" + is used as the SecretKeyName + type: string + secretRef: + description: Data store parameters should be placed + as-is from the "feature_store.yaml" under the + secret key. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + type: + description: Type of the persistence type you + want to use. + enum: + - snowflake.online + - redis + - ikv + - datastore + - dynamodb + - bigtable + - postgres + - cassandra + - mysql + - hazelcast + - singlestore + - hbase + - elasticsearch + - qdrant + - couchbase.online + - milvus + type: string + required: + - secretRef + - type + type: object + type: object + x-kubernetes-validations: + - message: One selection required between file or store. + rule: '[has(self.file), has(self.store)].exists_one(c, + c)' + server: + description: Creates a feature server container + properties: + env: + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: 'Selects a field of the pod: + supports metadata.name, metadata.namespace, + `metadata.labels['''']`, `metadata.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, + defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults + to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to + select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in + the pod's namespace + properties: + key: + description: The key of the secret to + select from. Must be a valid secret + key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + items: + description: EnvFromSource represents the source + of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the Secret + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + type: string + imagePullPolicy: + description: PullPolicy describes a policy for if/when + to pull a container image + type: string + logLevel: + description: |- + LogLevel sets the logging level for the server + Allowed values: "debug", "info", "warning", "error", "critical". + enum: + - debug + - info + - warning + - error + - critical + type: string + metrics: + description: Metrics exposes Prometheus-compatible + metrics for the Feast server when enabled. + type: boolean + nodeSelector: + additionalProperties: + type: string + type: object + resources: + description: ResourceRequirements describes the compute + resource requirements. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes. + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Requests describes the minimum amount + of compute resources required. + type: object + type: object + tls: + description: TlsConfigs configures server TLS for + a feast service. + properties: + disable: + description: will disable TLS for the feast service. + useful in an openshift cluster, for example, + where TLS is configured by default + type: boolean + secretKeyNames: + description: SecretKeyNames defines the secret + key names for the TLS key and cert. + properties: + tlsCrt: + description: defaults to "tls.crt" + type: string + tlsKey: + description: defaults to "tls.key" + type: string + type: object + secretRef: + description: references the local k8s secret where + the TLS key and cert reside + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + type: object + x-kubernetes-validations: + - message: '`secretRef` required if `disable` is false.' + rule: '(!has(self.disable) || !self.disable) ? has(self.secretRef) + : true' + volumeMounts: + description: VolumeMounts defines the list of volumes + that should be mounted into the feast container. + items: + description: VolumeMount describes a mounting of + a Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume + from which the container's volume should be + mounted. + type: string + required: + - mountPath + - name + type: object + type: array + type: object + type: object + registry: + description: Registry configures the registry service. One + selection is required. Local is the default setting. + properties: + local: + description: LocalRegistryConfig configures the registry + service + properties: + persistence: + description: RegistryPersistence configures the persistence + settings for the registry service + properties: + file: + description: RegistryFilePersistence configures + the file-based persistence for the registry + service + properties: + cache_mode: + description: |- + CacheMode defines the registry cache update strategy. + Allowed values are "sync" and "thread". + enum: + - none + - sync + - thread + type: string + cache_ttl_seconds: + description: CacheTTLSeconds defines the TTL + (in seconds) for the registry cache. + format: int32 + minimum: 0 + type: integer + path: + type: string + pvc: + description: PvcConfig defines the settings + for a persistent file store based on PVCs. + properties: + create: + description: Settings for creating a new + PVC + properties: + accessModes: + description: AccessModes k8s persistent + volume access modes. Defaults to + ["ReadWriteOnce"]. + items: + type: string + type: array + resources: + description: Resources describes the + storage resource requirements for + a volume. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes. + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Requests describes + the minimum amount of compute + resources required. + type: object + type: object + storageClassName: + description: StorageClassName is the + name of an existing StorageClass + to which this persistent volume + belongs. + type: string + type: object + x-kubernetes-validations: + - message: PvcCreate is immutable + rule: self == oldSelf + mountPath: + description: |- + MountPath within the container at which the volume should be mounted. + Must start by "/" and cannot contain ':'. + type: string + ref: + description: Reference to an existing + field + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + required: + - mountPath + type: object + x-kubernetes-validations: + - message: One selection is required between + ref and create. + rule: '[has(self.ref), has(self.create)].exists_one(c, + c)' + - message: Mount path must start with '/' + and must not contain ':' + rule: self.mountPath.matches('^/[^:]*$') + s3_additional_kwargs: + additionalProperties: + type: string + type: object + type: object + x-kubernetes-validations: + - message: Registry files must use absolute paths + or be S3 ('s3://') or GS ('gs://') object + store URIs. + rule: '(!has(self.pvc) && has(self.path)) ? + (self.path.startsWith(''/'') || self.path.startsWith(''s3://'') + || self.path.startsWith(''gs://'')) : true' + - message: PVC path must be a file name only, + with no slashes. + rule: '(has(self.pvc) && has(self.path)) ? !self.path.startsWith(''/'') + : true' + - message: PVC persistence does not support S3 + or GS object store URIs. + rule: '(has(self.pvc) && has(self.path)) ? !(self.path.startsWith(''s3://'') + || self.path.startsWith(''gs://'')) : true' + - message: Additional S3 settings are available + only for S3 object store URIs. + rule: '(has(self.s3_additional_kwargs) && has(self.path)) + ? self.path.startsWith(''s3://'') : true' + store: + description: RegistryDBStorePersistence configures + the DB store persistence for the registry service + properties: + secretKeyName: + description: By default, the selected store + "type" is used as the SecretKeyName + type: string + secretRef: + description: Data store parameters should + be placed as-is from the "feature_store.yaml" + under the secret key. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + type: + description: Type of the persistence type + you want to use. + enum: + - sql + - snowflake.registry + type: string + required: + - secretRef + - type + type: object + type: object + x-kubernetes-validations: + - message: One selection required between file or + store. + rule: '[has(self.file), has(self.store)].exists_one(c, + c)' + server: + description: Creates a registry server container + properties: + env: + items: + description: EnvVar represents an environment + variable present in a Container. + properties: + name: + description: Name of the environment variable. + Must be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any + type: string + valueFrom: + description: Source for the environment + variable's value. Cannot be used if value + is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the + ConfigMap or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: 'Selects a field of the + pod: supports metadata.name, metadata.namespace, + `metadata.labels['''']`, `metadata.' + properties: + apiVersion: + description: Version of the schema + the FieldPath is written in terms + of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to + select in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env + vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource + to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret + in the pod's namespace + properties: + key: + description: The key of the secret + to select from. Must be a valid + secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the + Secret or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + items: + description: EnvFromSource represents the source + of a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the ConfigMap + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend + to each key in the ConfigMap. Must be + a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the Secret + must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + grpc: + description: Enable gRPC registry server. Defaults + to true if unset. + type: boolean + image: + type: string + imagePullPolicy: + description: PullPolicy describes a policy for + if/when to pull a container image + type: string + logLevel: + description: |- + LogLevel sets the logging level for the server + Allowed values: "debug", "info", "warning", "error", "critical". + enum: + - debug + - info + - warning + - error + - critical + type: string + metrics: + description: Metrics exposes Prometheus-compatible + metrics for the Feast server when enabled. + type: boolean + nodeSelector: + additionalProperties: + type: string + type: object + resources: + description: ResourceRequirements describes the + compute resource requirements. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + items: + description: ResourceClaim references one + entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes. + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Requests describes the minimum + amount of compute resources required. + type: object + type: object + restAPI: + description: Enable REST API registry server. + type: boolean + tls: + description: TlsConfigs configures server TLS + for a feast service. + properties: + disable: + description: will disable TLS for the feast + service. useful in an openshift cluster, + for example, where TLS is configured by + default + type: boolean + secretKeyNames: + description: SecretKeyNames defines the secret + key names for the TLS key and cert. + properties: + tlsCrt: + description: defaults to "tls.crt" + type: string + tlsKey: + description: defaults to "tls.key" + type: string + type: object + secretRef: + description: references the local k8s secret + where the TLS key and cert reside + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + type: object + x-kubernetes-validations: + - message: '`secretRef` required if `disable` + is false.' + rule: '(!has(self.disable) || !self.disable) + ? has(self.secretRef) : true' + volumeMounts: + description: VolumeMounts defines the list of + volumes that should be mounted into the feast + container. + items: + description: VolumeMount describes a mounting + of a Volume within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + type: string + name: + description: This must match the Name of + a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume + from which the container's volume should + be mounted. + type: string + required: + - mountPath + - name + type: object + type: array + type: object + x-kubernetes-validations: + - message: At least one of restAPI or grpc must be + true + rule: self.restAPI == true || self.grpc == true + || !has(self.grpc) + type: object + remote: + description: RemoteRegistryConfig points to a remote feast + registry server. + properties: + feastRef: + description: Reference to an existing `FeatureStore` + CR in the same k8s cluster. + properties: + name: + description: Name of the FeatureStore + type: string + namespace: + description: Namespace of the FeatureStore + type: string + required: + - name + type: object + hostname: + description: Host address of the remote registry service + - :, e.g. `registry..svc.cluster.local:80` + type: string + tls: + description: TlsRemoteRegistryConfigs configures client + TLS for a remote feast registry. + properties: + certName: + description: defines the configmap key name for + the client TLS cert. + type: string + configMapRef: + description: references the local k8s configmap + where the TLS cert resides + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + required: + - certName + - configMapRef + type: object + type: object + x-kubernetes-validations: + - message: One selection required. + rule: '[has(self.hostname), has(self.feastRef)].exists_one(c, + c)' + type: object + x-kubernetes-validations: + - message: One selection required. + rule: '[has(self.local), has(self.remote)].exists_one(c, + c)' + securityContext: + description: PodSecurityContext holds pod-level security attributes + and common container settings. + properties: + appArmorProfile: + description: appArmorProfile is the AppArmor options to + use by the containers in this pod. + properties: + localhostProfile: + description: localhostProfile indicates a profile + loaded on the node that should be used. + type: string + type: + description: type indicates which kind of AppArmor + profile will be applied. + type: string + required: + - type + type: object + fsGroup: + description: A special supplemental group that applies + to all containers in a pod. + format: int64 + type: integer + fsGroupChangePolicy: + description: |- + fsGroupChangePolicy defines behavior of changing ownership and permission of the volume + before being exposed inside Pod. + type: string + runAsGroup: + description: |- + The GID to run the entrypoint of the container process. + Uses runtime default if unset. + format: int64 + type: integer + runAsNonRoot: + description: Indicates that the container must run as + a non-root user. + type: boolean + runAsUser: + description: |- + The UID to run the entrypoint of the container process. + Defaults to user specified in image metadata if unspecified. + format: int64 + type: integer + seLinuxOptions: + description: The SELinux context to be applied to all + containers. + properties: + level: + description: Level is SELinux level label that applies + to the container. + type: string + role: + description: Role is a SELinux role label that applies + to the container. + type: string + type: + description: Type is a SELinux type label that applies + to the container. + type: string + user: + description: User is a SELinux user label that applies + to the container. + type: string + type: object + seccompProfile: + description: |- + The seccomp options to use by the containers in this pod. + Note that this field cannot be set when spec.os. + properties: + localhostProfile: + description: localhostProfile indicates a profile + defined in a file on the node should be used. + type: string + type: + description: type indicates which kind of seccomp + profile will be applied. + type: string + required: + - type + type: object + supplementalGroups: + description: |- + A list of groups applied to the first process run in each container, in addition + to the container's primary GID, the fsG + items: + format: int64 + type: integer + type: array + x-kubernetes-list-type: atomic + sysctls: + description: Sysctls hold a list of namespaced sysctls + used for the pod. + items: + description: Sysctl defines a kernel parameter to be + set + properties: + name: + description: Name of a property to set + type: string + value: + description: Value of a property to set + type: string + required: + - name + - value + type: object + type: array + x-kubernetes-list-type: atomic + windowsOptions: + description: The Windows specific settings applied to + all containers. + properties: + gmsaCredentialSpec: + description: |- + GMSACredentialSpec is where the GMSA admission webhook + (https://github. + type: string + gmsaCredentialSpecName: + description: GMSACredentialSpecName is the name of + the GMSA credential spec to use. + type: string + hostProcess: + description: HostProcess determines if a container + should be run as a 'Host Process' container. + type: boolean + runAsUserName: + description: The UserName in Windows to run the entrypoint + of the container process. + type: string + type: object + type: object + ui: + description: Creates a UI server container + properties: + env: + items: + description: EnvVar represents an environment variable + present in a Container. + properties: + name: + description: Name of the environment variable. Must + be a C_IDENTIFIER. + type: string + value: + description: |- + Variable references $(VAR_NAME) are expanded + using the previously defined environment variables in the container and + any + type: string + valueFrom: + description: Source for the environment variable's + value. Cannot be used if value is not empty. + properties: + configMapKeyRef: + description: Selects a key of a ConfigMap. + properties: + key: + description: The key to select. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the ConfigMap + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + fieldRef: + description: 'Selects a field of the pod: supports + metadata.name, metadata.namespace, `metadata.labels['''']`, + `metadata.' + properties: + apiVersion: + description: Version of the schema the FieldPath + is written in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, limits. + properties: + containerName: + description: 'Container name: required for + volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + secretKeyRef: + description: Selects a key of a secret in the + pod's namespace + properties: + key: + description: The key of the secret to select + from. Must be a valid secret key. + type: string + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the Secret + or its key must be defined + type: boolean + required: + - key + type: object + x-kubernetes-map-type: atomic + type: object + required: + - name + type: object + type: array + envFrom: + items: + description: EnvFromSource represents the source of + a set of ConfigMaps + properties: + configMapRef: + description: The ConfigMap to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the ConfigMap must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + prefix: + description: An optional identifier to prepend to + each key in the ConfigMap. Must be a C_IDENTIFIER. + type: string + secretRef: + description: The Secret to select from + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: Specify whether the Secret must + be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + type: object + type: array + image: + type: string + imagePullPolicy: + description: PullPolicy describes a policy for if/when + to pull a container image + type: string + logLevel: + description: |- + LogLevel sets the logging level for the server + Allowed values: "debug", "info", "warning", "error", "critical". + enum: + - debug + - info + - warning + - error + - critical + type: string + metrics: + description: Metrics exposes Prometheus-compatible metrics + for the Feast server when enabled. + type: boolean + nodeSelector: + additionalProperties: + type: string + type: object + resources: + description: ResourceRequirements describes the compute + resource requirements. + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + items: + description: ResourceClaim references one entry + in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes. + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Requests describes the minimum amount + of compute resources required. + type: object + type: object + tls: + description: TlsConfigs configures server TLS for a feast + service. + properties: + disable: + description: will disable TLS for the feast service. + useful in an openshift cluster, for example, where + TLS is configured by default + type: boolean + secretKeyNames: + description: SecretKeyNames defines the secret key + names for the TLS key and cert. + properties: + tlsCrt: + description: defaults to "tls.crt" + type: string + tlsKey: + description: defaults to "tls.key" + type: string + type: object + secretRef: + description: references the local k8s secret where + the TLS key and cert reside + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + type: object + x-kubernetes-validations: + - message: '`secretRef` required if `disable` is false.' + rule: '(!has(self.disable) || !self.disable) ? has(self.secretRef) + : true' + volumeMounts: + description: VolumeMounts defines the list of volumes + that should be mounted into the feast container. + items: + description: VolumeMount describes a mounting of a Volume + within a container. + properties: + mountPath: + description: |- + Path within the container at which the volume should be mounted. Must + not contain ':'. + type: string + mountPropagation: + description: |- + mountPropagation determines how mounts are propagated from the host + to container and the other way around. + type: string + name: + description: This must match the Name of a Volume. + type: string + readOnly: + description: |- + Mounted read-only if true, read-write otherwise (false or unspecified). + Defaults to false. + type: boolean + recursiveReadOnly: + description: |- + RecursiveReadOnly specifies whether read-only mounts should be handled + recursively. + type: string + subPath: + description: |- + Path within the volume from which the container's volume should be mounted. + Defaults to "" (volume's root). + type: string + subPathExpr: + description: Expanded path within the volume from + which the container's volume should be mounted. + type: string + required: + - mountPath + - name + type: object + type: array + type: object + volumes: + description: Volumes specifies the volumes to mount in the + FeatureStore deployment. + items: + description: Volume represents a named volume in a pod that + may be accessed by any container in the pod. + properties: + awsElasticBlockStore: + description: |- + awsElasticBlockStore represents an AWS Disk resource that is attached to a + kubelet's host machine and then exposed to th + properties: + fsType: + description: fsType is the filesystem type of the + volume that you want to mount. + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + format: int32 + type: integer + readOnly: + description: |- + readOnly value true will force the readOnly setting in VolumeMounts. + More info: https://kubernetes. + type: boolean + volumeID: + description: |- + volumeID is unique ID of the persistent disk resource in AWS (Amazon EBS volume). + More info: https://kubernetes. + type: string + required: + - volumeID + type: object + azureDisk: + description: azureDisk represents an Azure Data Disk + mount on the host and bind mount to the pod. + properties: + cachingMode: + description: 'cachingMode is the Host Caching mode: + None, Read Only, Read Write.' + type: string + diskName: + description: diskName is the Name of the data disk + in the blob storage + type: string + diskURI: + description: diskURI is the URI of data disk in + the blob storage + type: string + fsType: + description: |- + fsType is Filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. + type: string + kind: + description: 'kind expected values are Shared: multiple + blob disks per storage account Dedicated: single + blob disk per storage accoun' + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + required: + - diskName + - diskURI + type: object + azureFile: + description: azureFile represents an Azure File Service + mount on the host and bind mount to the pod. + properties: + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretName: + description: secretName is the name of secret that + contains Azure Storage Account Name and Key + type: string + shareName: + description: shareName is the azure share Name + type: string + required: + - secretName + - shareName + type: object + cephfs: + description: cephFS represents a Ceph FS mount on the + host that shares a pod's lifetime + properties: + monitors: + description: |- + monitors is Required: Monitors is a collection of Ceph monitors + More info: https://examples.k8s. + items: + type: string + type: array + x-kubernetes-list-type: atomic + path: + description: 'path is Optional: Used as the mounted + root, rather than the full Ceph tree, default + is /' + type: string + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretFile: + description: 'secretFile is Optional: SecretFile + is the path to key ring for User, default is /etc/ceph/user.' + type: string + secretRef: + description: 'secretRef is Optional: SecretRef is + reference to the authentication secret for User, + default is empty.' + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is optional: User is the rados user name, default is admin + More info: https://examples.k8s. + type: string + required: + - monitors + type: object + cinder: + description: |- + cinder represents a cinder volume attached and mounted on kubelets host machine. + More info: https://examples.k8s. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef is optional: points to a secret object containing parameters used to connect + to OpenStack. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + volumeID: + description: |- + volumeID used to identify the volume in cinder. + More info: https://examples.k8s.io/mysql-cinder-pd/README.md + type: string + required: + - volumeID + type: object + configMap: + description: configMap represents a configMap that should + populate this volume + properties: + defaultMode: + description: 'defaultMode is optional: mode bits + used to set permissions on created files by default.' + format: int32 + type: integer + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volum + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits + used to set permissions on this file.' + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: optional specify whether the ConfigMap + or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + csi: + description: csi (Container Storage Interface) represents + ephemeral storage that is handled by certain external + CSI drivers (Beta fea + properties: + driver: + description: driver is the name of the CSI driver + that handles this volume. + type: string + fsType: + description: fsType to mount. Ex. "ext4", "xfs", + "ntfs". + type: string + nodePublishSecretRef: + description: |- + nodePublishSecretRef is a reference to the secret object containing + sensitive information to pass to the CSI driver to c + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + readOnly: + description: |- + readOnly specifies a read-only configuration for the volume. + Defaults to false (read/write). + type: boolean + volumeAttributes: + additionalProperties: + type: string + description: |- + volumeAttributes stores driver-specific properties that are passed to the CSI + driver. + type: object + required: + - driver + type: object + downwardAPI: + description: downwardAPI represents downward API about + the pod that should populate this volume + properties: + defaultMode: + description: 'Optional: mode bits to use on created + files by default.' + format: int32 + type: integer + items: + description: Items is a list of downward API volume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing the + pod field + properties: + fieldRef: + description: 'Required: Selects a field of + the pod: only annotations, labels, name, + namespace and uid are supported.' + properties: + apiVersion: + description: Version of the schema the + FieldPath is written in terms of, defaults + to "v1". + type: string + fieldPath: + description: Path of the field to select + in the specified API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal valu + format: int32 + type: integer + path: + description: 'Required: Path is the relative + path name of the file to be created. Must + not be absolute or contain the ''..'' path.' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests. + properties: + containerName: + description: 'Container name: required + for volumes, optional for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output format + of the exposed resources, defaults to + "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + emptyDir: + description: |- + emptyDir represents a temporary directory that shares a pod's lifetime. + More info: https://kubernetes. + properties: + medium: + description: medium represents what type of storage + medium should back this directory. + type: string + sizeLimit: + anyOf: + - type: integer + - type: string + description: sizeLimit is the total amount of local + storage required for this EmptyDir volume. + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + type: object + ephemeral: + description: ephemeral represents a volume that is handled + by a cluster storage driver. + properties: + volumeClaimTemplate: + description: Will be used to create a stand-alone + PVC to provision the volume. + properties: + metadata: + description: |- + May contain labels and annotations that will be copied into the PVC + when creating it. + type: object + spec: + description: The specification for the PersistentVolumeClaim. + properties: + accessModes: + description: |- + accessModes contains the desired access modes the volume should have. + More info: https://kubernetes. + items: + type: string + type: array + x-kubernetes-list-type: atomic + dataSource: + description: |- + dataSource field can be used to specify either: + * An existing VolumeSnapshot object (snapshot.storage.k8s. + properties: + apiGroup: + description: APIGroup is the group for + the resource being referenced. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + required: + - kind + - name + type: object + x-kubernetes-map-type: atomic + dataSourceRef: + description: |- + dataSourceRef specifies the object from which to populate the volume with data, if a non-empty + volume is desired. + properties: + apiGroup: + description: APIGroup is the group for + the resource being referenced. + type: string + kind: + description: Kind is the type of resource + being referenced + type: string + name: + description: Name is the name of resource + being referenced + type: string + namespace: + description: |- + Namespace is the namespace of resource being referenced + Note that when a namespace is specified, a gateway.networking. + type: string + required: + - kind + - name + type: object + resources: + description: resources represents the minimum + resources the volume should have. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes. + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: Requests describes the + minimum amount of compute resources + required. + type: object + type: object + selector: + description: selector is a label query over + volumes to consider for binding. + properties: + matchExpressions: + description: matchExpressions is a list + of label selector requirements. The + requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of + {key,value} pairs. + type: object + type: object + x-kubernetes-map-type: atomic + storageClassName: + description: |- + storageClassName is the name of the StorageClass required by the claim. + More info: https://kubernetes. + type: string + volumeAttributesClassName: + description: volumeAttributesClassName may + be used to set the VolumeAttributesClass + used by this claim. + type: string + volumeMode: + description: volumeMode defines what type + of volume is required by the claim. + type: string + volumeName: + description: volumeName is the binding reference + to the PersistentVolume backing this claim. + type: string + type: object + required: + - spec + type: object + type: object + fc: + description: fc represents a Fibre Channel resource + that is attached to a kubelet's host machine and then + exposed to the pod. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. + type: string + lun: + description: 'lun is Optional: FC target lun number' + format: int32 + type: integer + readOnly: + description: |- + readOnly is Optional: Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + targetWWNs: + description: 'targetWWNs is Optional: FC target + worldwide names (WWNs)' + items: + type: string + type: array + x-kubernetes-list-type: atomic + wwids: + description: "wwids Optional: FC volume world wide + identifiers (wwids)\nEither wwids or combination + of targetWWNs and lun must be set, " + items: + type: string + type: array + x-kubernetes-list-type: atomic + type: object + flexVolume: + description: |- + flexVolume represents a generic volume resource that is + provisioned/attached using an exec based plugin. + properties: + driver: + description: driver is the name of the driver to + use for this volume. + type: string + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. + type: string + options: + additionalProperties: + type: string + description: 'options is Optional: this field holds + extra command options if any.' + type: object + readOnly: + description: |- + readOnly is Optional: defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef is Optional: secretRef is reference to the secret object containing + sensitive information to pass to the plugi + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + required: + - driver + type: object + flocker: + description: flocker represents a Flocker volume attached + to a kubelet's host machine. + properties: + datasetName: + description: |- + datasetName is Name of the dataset stored as metadata -> name on the dataset for Flocker + should be considered as depreca + type: string + datasetUUID: + description: datasetUUID is the UUID of the dataset. + This is unique identifier of a Flocker dataset + type: string + type: object + gcePersistentDisk: + description: |- + gcePersistentDisk represents a GCE Disk resource that is attached to a + kubelet's host machine and then exposed to the po + properties: + fsType: + description: fsType is filesystem type of the volume + that you want to mount. + type: string + partition: + description: |- + partition is the partition in the volume that you want to mount. + If omitted, the default is to mount by volume name. + format: int32 + type: integer + pdName: + description: |- + pdName is unique name of the PD resource in GCE. Used to identify the disk in GCE. + More info: https://kubernetes. + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://kubernetes. + type: boolean + required: + - pdName + type: object + gitRepo: + description: |- + gitRepo represents a git repository at a particular revision. + DEPRECATED: GitRepo is deprecated. + properties: + directory: + description: |- + directory is the target directory name. + Must not contain or start with '..'. If '. + type: string + repository: + description: repository is the URL + type: string + revision: + description: revision is the commit hash for the + specified revision. + type: string + required: + - repository + type: object + glusterfs: + description: |- + glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + More info: https://examples.k8s. + properties: + endpoints: + description: |- + endpoints is the endpoint name that details Glusterfs topology. + More info: https://examples.k8s. + type: string + path: + description: |- + path is the Glusterfs volume path. + More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + type: string + readOnly: + description: |- + readOnly here will force the Glusterfs volume to be mounted with read-only permissions. + Defaults to false. + type: boolean + required: + - endpoints + - path + type: object + hostPath: + description: |- + hostPath represents a pre-existing file or directory on the host + machine that is directly exposed to the container. + properties: + path: + description: |- + path of the directory on the host. + If the path is a symlink, it will follow the link to the real path. + type: string + type: + description: |- + type for HostPath Volume + Defaults to "" + More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + type: string + required: + - path + type: object + iscsi: + description: |- + iscsi represents an ISCSI Disk resource that is attached to a + kubelet's host machine and then exposed to the pod. + properties: + chapAuthDiscovery: + description: chapAuthDiscovery defines whether support + iSCSI Discovery CHAP authentication + type: boolean + chapAuthSession: + description: chapAuthSession defines whether support + iSCSI Session CHAP authentication + type: boolean + fsType: + description: fsType is the filesystem type of the + volume that you want to mount. + type: string + initiatorName: + description: initiatorName is the custom iSCSI Initiator + Name. + type: string + iqn: + description: iqn is the target iSCSI Qualified Name. + type: string + iscsiInterface: + description: |- + iscsiInterface is the interface Name that uses an iSCSI transport. + Defaults to 'default' (tcp). + type: string + lun: + description: lun represents iSCSI Target Lun number. + format: int32 + type: integer + portals: + description: portals is the iSCSI Target Portal + List. + items: + type: string + type: array + x-kubernetes-list-type: atomic + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + type: boolean + secretRef: + description: secretRef is the CHAP Secret for iSCSI + target and initiator authentication + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + targetPortal: + description: targetPortal is iSCSI Target Portal. + type: string + required: + - iqn + - lun + - targetPortal + type: object + name: + description: |- + name of the volume. + Must be a DNS_LABEL and unique within the pod. + More info: https://kubernetes. + type: string + nfs: + description: |- + nfs represents an NFS mount on the host that shares a pod's lifetime + More info: https://kubernetes. + properties: + path: + description: |- + path that is exported by the NFS server. + More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + type: string + readOnly: + description: |- + readOnly here will force the NFS export to be mounted with read-only permissions. + Defaults to false. + type: boolean + server: + description: |- + server is the hostname or IP address of the NFS server. + More info: https://kubernetes. + type: string + required: + - path + - server + type: object + persistentVolumeClaim: + description: |- + persistentVolumeClaimVolumeSource represents a reference to a + PersistentVolumeClaim in the same namespace. + properties: + claimName: + description: claimName is the name of a PersistentVolumeClaim + in the same namespace as the pod using this volume. + type: string + readOnly: + description: |- + readOnly Will force the ReadOnly setting in VolumeMounts. + Default false. + type: boolean + required: + - claimName + type: object + photonPersistentDisk: + description: photonPersistentDisk represents a PhotonController + persistent disk attached and mounted on kubelets host + machine + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. + type: string + pdID: + description: pdID is the ID that identifies Photon + Controller persistent disk + type: string + required: + - pdID + type: object + portworxVolume: + description: portworxVolume represents a portworx volume + attached and mounted on kubelets host machine + properties: + fsType: + description: |- + fSType represents the filesystem type to mount + Must be a filesystem type supported by the host operating system. + Ex. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + volumeID: + description: volumeID uniquely identifies a Portworx + volume + type: string + required: + - volumeID + type: object + projected: + description: projected items for all in one resources + secrets, configmaps, and downward API + properties: + defaultMode: + description: defaultMode are the mode bits used + to set permissions on created files by default. + format: int32 + type: integer + sources: + description: sources is the list of volume projections + items: + description: Projection that may be projected + along with other supported volume types + properties: + clusterTrustBundle: + description: ClusterTrustBundle allows a pod + to access the `.spec. + properties: + labelSelector: + description: |- + Select all ClusterTrustBundles that match this label selector. Only has + effect if signerName is set. + properties: + matchExpressions: + description: matchExpressions is a + list of label selector requirements. + The requirements are ANDed. + items: + description: |- + A label selector requirement is a selector that contains values, a key, and an operator that + relates the key and values. + properties: + key: + description: key is the label + key that the selector applies + to. + type: string + operator: + description: |- + operator represents a key's relationship to a set of values. + Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: |- + values is an array of string values. If the operator is In or NotIn, + the values array must be non-empty. + items: + type: string + type: array + x-kubernetes-list-type: atomic + required: + - key + - operator + type: object + type: array + x-kubernetes-list-type: atomic + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map + of {key,value} pairs. + type: object + type: object + x-kubernetes-map-type: atomic + name: + description: |- + Select a single ClusterTrustBundle by object name. Mutually-exclusive + with signerName and labelSelector. + type: string + optional: + description: |- + If true, don't block pod startup if the referenced ClusterTrustBundle(s) + aren't available. + type: boolean + path: + description: Relative path from the volume + root to write the bundle. + type: string + signerName: + description: |- + Select all ClusterTrustBundles that match this signer name. + Mutually-exclusive with name. + type: string + required: + - path + type: object + configMap: + description: configMap information about the + configMap data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + ConfigMap will be projected into the volum + items: + description: Maps a string key to a + path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: + mode bits used to set permissions + on this file.' + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: optional specify whether + the ConfigMap or its keys must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + downwardAPI: + description: downwardAPI information about + the downwardAPI data to project + properties: + items: + description: Items is a list of DownwardAPIVolume + file + items: + description: DownwardAPIVolumeFile represents + information to create the file containing + the pod field + properties: + fieldRef: + description: 'Required: Selects + a field of the pod: only annotations, + labels, name, namespace and uid + are supported.' + properties: + apiVersion: + description: Version of the + schema the FieldPath is written + in terms of, defaults to "v1". + type: string + fieldPath: + description: Path of the field + to select in the specified + API version. + type: string + required: + - fieldPath + type: object + x-kubernetes-map-type: atomic + mode: + description: |- + Optional: mode bits used to set permissions on this file, must be an octal value + between 0000 and 0777 or a decimal valu + format: int32 + type: integer + path: + description: 'Required: Path is the + relative path name of the file + to be created. Must not be absolute + or contain the ''..'' path.' + type: string + resourceFieldRef: + description: |- + Selects a resource of the container: only resources limits and requests + (limits.cpu, limits.memory, requests. + properties: + containerName: + description: 'Container name: + required for volumes, optional + for env vars' + type: string + divisor: + anyOf: + - type: integer + - type: string + description: Specifies the output + format of the exposed resources, + defaults to "1" + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + resource: + description: 'Required: resource + to select' + type: string + required: + - resource + type: object + x-kubernetes-map-type: atomic + required: + - path + type: object + type: array + x-kubernetes-list-type: atomic + type: object + secret: + description: secret information about the + secret data to project + properties: + items: + description: |- + items if unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume a + items: + description: Maps a string key to a + path within a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: + mode bits used to set permissions + on this file.' + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + optional: + description: optional field specify whether + the Secret or its key must be defined + type: boolean + type: object + x-kubernetes-map-type: atomic + serviceAccountToken: + description: serviceAccountToken is information + about the serviceAccountToken data to project + properties: + audience: + description: audience is the intended + audience of the token. + type: string + expirationSeconds: + description: |- + expirationSeconds is the requested duration of validity of the service + account token. + format: int64 + type: integer + path: + description: |- + path is the path relative to the mount point of the file to project the + token into. + type: string + required: + - path + type: object + type: object + type: array + x-kubernetes-list-type: atomic + type: object + quobyte: + description: quobyte represents a Quobyte mount on the + host that shares a pod's lifetime + properties: + group: + description: |- + group to map volume access to + Default is no group + type: string + readOnly: + description: |- + readOnly here will force the Quobyte volume to be mounted with read-only permissions. + Defaults to false. + type: boolean + registry: + description: |- + registry represents a single or multiple Quobyte Registry services + specified as a string as host:port pair (multiple ent + type: string + tenant: + description: |- + tenant owning the given Quobyte volume in the Backend + Used with dynamically provisioned Quobyte volumes, value is set by + type: string + user: + description: |- + user to map volume access to + Defaults to serivceaccount user + type: string + volume: + description: volume is a string that references + an already created Quobyte volume by name. + type: string + required: + - registry + - volume + type: object + rbd: + description: |- + rbd represents a Rados Block Device mount on the host that shares a pod's lifetime. + More info: https://examples.k8s. + properties: + fsType: + description: fsType is the filesystem type of the + volume that you want to mount. + type: string + image: + description: |- + image is the rados image name. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + keyring: + description: |- + keyring is the path to key ring for RBDUser. + Default is /etc/ceph/keyring. + More info: https://examples.k8s. + type: string + monitors: + description: |- + monitors is a collection of Ceph monitors. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + items: + type: string + type: array + x-kubernetes-list-type: atomic + pool: + description: |- + pool is the rados pool name. + Default is rbd. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + readOnly: + description: |- + readOnly here will force the ReadOnly setting in VolumeMounts. + Defaults to false. + More info: https://examples.k8s. + type: boolean + secretRef: + description: |- + secretRef is name of the authentication secret for RBDUser. If provided + overrides keyring. + Default is nil. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + user: + description: |- + user is the rados user name. + Default is admin. + More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + type: string + required: + - image + - monitors + type: object + scaleIO: + description: scaleIO represents a ScaleIO persistent + volume attached and mounted on Kubernetes nodes. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. + type: string + gateway: + description: gateway is the host address of the + ScaleIO API Gateway. + type: string + protectionDomain: + description: protectionDomain is the name of the + ScaleIO Protection Domain for the configured storage. + type: string + readOnly: + description: |- + readOnly Defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef references to the secret for ScaleIO user and other + sensitive information. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + sslEnabled: + description: sslEnabled Flag enable/disable SSL + communication with Gateway, default false + type: boolean + storageMode: + description: storageMode indicates whether the storage + for a volume should be ThickProvisioned or ThinProvisioned. + type: string + storagePool: + description: storagePool is the ScaleIO Storage + Pool associated with the protection domain. + type: string + system: + description: system is the name of the storage system + as configured in ScaleIO. + type: string + volumeName: + description: |- + volumeName is the name of a volume already created in the ScaleIO system + that is associated with this volume source. + type: string + required: + - gateway + - secretRef + - system + type: object + secret: + description: |- + secret represents a secret that should populate this volume. + More info: https://kubernetes. + properties: + defaultMode: + description: 'defaultMode is Optional: mode bits + used to set permissions on created files by default.' + format: int32 + type: integer + items: + description: |- + items If unspecified, each key-value pair in the Data field of the referenced + Secret will be projected into the volume a + items: + description: Maps a string key to a path within + a volume. + properties: + key: + description: key is the key to project. + type: string + mode: + description: 'mode is Optional: mode bits + used to set permissions on this file.' + format: int32 + type: integer + path: + description: |- + path is the relative path of the file to map the key to. + May not be an absolute path. + type: string + required: + - key + - path + type: object + type: array + x-kubernetes-list-type: atomic + optional: + description: optional field specify whether the + Secret or its keys must be defined + type: boolean + secretName: + description: |- + secretName is the name of the secret in the pod's namespace to use. + More info: https://kubernetes. + type: string + type: object + storageos: + description: storageOS represents a StorageOS volume + attached and mounted on Kubernetes nodes. + properties: + fsType: + description: |- + fsType is the filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. + type: string + readOnly: + description: |- + readOnly defaults to false (read/write). ReadOnly here will force + the ReadOnly setting in VolumeMounts. + type: boolean + secretRef: + description: |- + secretRef specifies the secret to use for obtaining the StorageOS API + credentials. + properties: + name: + default: "" + description: |- + Name of the referent. + This field is effectively required, but due to backwards compatibility is + allowed to be empty. + type: string + type: object + x-kubernetes-map-type: atomic + volumeName: + description: |- + volumeName is the human-readable name of the StorageOS volume. Volume + names are only unique within a namespace. + type: string + volumeNamespace: + description: volumeNamespace specifies the scope + of the volume within StorageOS. + type: string + type: object + vsphereVolume: + description: vsphereVolume represents a vSphere volume + attached and mounted on kubelets host machine + properties: + fsType: + description: |- + fsType is filesystem type to mount. + Must be a filesystem type supported by the host operating system. + Ex. + type: string + storagePolicyID: + description: storagePolicyID is the storage Policy + Based Management (SPBM) profile ID associated + with the StoragePolicyName. + type: string + storagePolicyName: + description: storagePolicyName is the storage Policy + Based Management (SPBM) profile name. + type: string + volumePath: + description: volumePath is the path that identifies + vSphere volume vmdk + type: string + required: + - volumePath + type: object + required: + - name + type: object + type: array + type: object + required: + - feastProject + type: object + clientConfigMap: + description: ConfigMap in this namespace containing a client `feature_store.yaml` + for this feast deployment + type: string + conditions: + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if . + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: |- + type of condition in CamelCase or in foo.example.com/CamelCase. + --- + Many .condition. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + cronJob: + description: CronJob in this namespace for this feast deployment + type: string + feastVersion: + type: string + phase: + type: string + serviceHostnames: + description: ServiceHostnames defines the service hostnames in the + format of :, e.g. example.svc.cluster.local:80 + properties: + offlineStore: + type: string + onlineStore: + type: string + registry: + type: string + registryRest: + type: string + ui: + type: string + type: object + type: object + type: object + served: true + storage: true + subresources: + status: {} + - additionalPrinterColumns: + - jsonPath: .status.phase + name: Status + type: string + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + deprecated: true + deprecationWarning: v1alpha1 is deprecated and will be removed in a future release. + Please migrate to v1. name: v1alpha1 schema: openAPIV3Schema: @@ -436,6 +8635,13 @@ spec: description: podReplacementPolicy specifies when to create replacement Pods. type: string + podTemplateAnnotations: + additionalProperties: + type: string + description: |- + PodTemplateAnnotations are annotations to be applied to the CronJob's PodTemplate + metadata. + type: object suspend: description: suspend specifies whether the Job controller should create Pods or not. @@ -1034,6 +9240,10 @@ spec: - error - critical type: string + metrics: + description: Metrics exposes Prometheus-compatible metrics + for the Feast server when enabled. + type: boolean nodeSelector: additionalProperties: type: string @@ -1493,6 +9703,10 @@ spec: - error - critical type: string + metrics: + description: Metrics exposes Prometheus-compatible metrics + for the Feast server when enabled. + type: boolean nodeSelector: additionalProperties: type: string @@ -1642,6 +9856,21 @@ spec: description: RegistryFilePersistence configures the file-based persistence for the registry service properties: + cache_mode: + description: |- + CacheMode defines the registry cache update strategy. + Allowed values are "sync" and "thread". + enum: + - none + - sync + - thread + type: string + cache_ttl_seconds: + description: CacheTTLSeconds defines the TTL (in + seconds) for the registry cache. + format: int32 + minimum: 0 + type: integer path: type: string pvc: @@ -1963,6 +10192,10 @@ spec: - error - critical type: string + metrics: + description: Metrics exposes Prometheus-compatible + metrics for the Feast server when enabled. + type: boolean nodeSelector: additionalProperties: type: string @@ -2462,6 +10695,10 @@ spec: - error - critical type: string + metrics: + description: Metrics exposes Prometheus-compatible metrics + for the Feast server when enabled. + type: boolean nodeSelector: additionalProperties: type: string @@ -4420,6 +12657,13 @@ spec: description: podReplacementPolicy specifies when to create replacement Pods. type: string + podTemplateAnnotations: + additionalProperties: + type: string + description: |- + PodTemplateAnnotations are annotations to be applied to the CronJob's PodTemplate + metadata. + type: object suspend: description: suspend specifies whether the Job controller should create Pods or not. @@ -5028,6 +13272,10 @@ spec: - error - critical type: string + metrics: + description: Metrics exposes Prometheus-compatible + metrics for the Feast server when enabled. + type: boolean nodeSelector: additionalProperties: type: string @@ -5495,6 +13743,10 @@ spec: - error - critical type: string + metrics: + description: Metrics exposes Prometheus-compatible + metrics for the Feast server when enabled. + type: boolean nodeSelector: additionalProperties: type: string @@ -5647,6 +13899,21 @@ spec: the file-based persistence for the registry service properties: + cache_mode: + description: |- + CacheMode defines the registry cache update strategy. + Allowed values are "sync" and "thread". + enum: + - none + - sync + - thread + type: string + cache_ttl_seconds: + description: CacheTTLSeconds defines the TTL + (in seconds) for the registry cache. + format: int32 + minimum: 0 + type: integer path: type: string pvc: @@ -5977,6 +14244,10 @@ spec: - error - critical type: string + metrics: + description: Metrics exposes Prometheus-compatible + metrics for the Feast server when enabled. + type: boolean nodeSelector: additionalProperties: type: string @@ -6486,6 +14757,10 @@ spec: - error - critical type: string + metrics: + description: Metrics exposes Prometheus-compatible metrics + for the Feast server when enabled. + type: boolean nodeSelector: additionalProperties: type: string @@ -8133,7 +16408,7 @@ spec: type: object type: object served: true - storage: true + storage: false subresources: status: {} status: diff --git a/infra/feast-operator/config/component_metadata.yaml b/infra/feast-operator/config/component_metadata.yaml index fd776ac19bb..a92c7fa318f 100644 --- a/infra/feast-operator/config/component_metadata.yaml +++ b/infra/feast-operator/config/component_metadata.yaml @@ -1,5 +1,5 @@ # This file is required to configure Feast release information for ODH/RHOAI Operator releases: - name: Feast - version: 0.57.0 + version: 0.58.0 repoUrl: https://github.com/feast-dev/feast diff --git a/infra/feast-operator/config/default/related_image_fs_patch.yaml b/infra/feast-operator/config/default/related_image_fs_patch.yaml index e954af1876e..ecb0b6a73aa 100644 --- a/infra/feast-operator/config/default/related_image_fs_patch.yaml +++ b/infra/feast-operator/config/default/related_image_fs_patch.yaml @@ -2,7 +2,7 @@ path: "/spec/template/spec/containers/0/env/0" value: name: RELATED_IMAGE_FEATURE_SERVER - value: quay.io/feastdev/feature-server:0.57.0 + value: quay.io/feastdev/feature-server:0.58.0 - op: replace path: "/spec/template/spec/containers/0/env/1" value: diff --git a/infra/feast-operator/config/manager/kustomization.yaml b/infra/feast-operator/config/manager/kustomization.yaml index 8b3405fb024..7670c255f67 100644 --- a/infra/feast-operator/config/manager/kustomization.yaml +++ b/infra/feast-operator/config/manager/kustomization.yaml @@ -5,4 +5,4 @@ kind: Kustomization images: - name: controller newName: quay.io/feastdev/feast-operator - newTag: 0.57.0 + newTag: 0.58.0 diff --git a/infra/feast-operator/config/manifests/bases/feast-operator.clusterserviceversion.yaml b/infra/feast-operator/config/manifests/bases/feast-operator.clusterserviceversion.yaml index b9915a296a4..c1ae77619ad 100644 --- a/infra/feast-operator/config/manifests/bases/feast-operator.clusterserviceversion.yaml +++ b/infra/feast-operator/config/manifests/bases/feast-operator.clusterserviceversion.yaml @@ -10,6 +10,11 @@ spec: apiservicedefinitions: {} customresourcedefinitions: owned: + - description: FeatureStore is the Schema for the featurestores API + displayName: Feature Store + kind: FeatureStore + name: featurestores.feast.dev + version: v1 - description: FeatureStore is the Schema for the featurestores API displayName: Feature Store kind: FeatureStore diff --git a/infra/feast-operator/config/overlays/odh/params.env b/infra/feast-operator/config/overlays/odh/params.env index 2e8ed43f3a2..85c5c52168e 100644 --- a/infra/feast-operator/config/overlays/odh/params.env +++ b/infra/feast-operator/config/overlays/odh/params.env @@ -1,3 +1,3 @@ -RELATED_IMAGE_FEAST_OPERATOR=quay.io/feastdev/feast-operator:0.57.0 -RELATED_IMAGE_FEATURE_SERVER=quay.io/feastdev/feature-server:0.57.0 +RELATED_IMAGE_FEAST_OPERATOR=quay.io/feastdev/feast-operator:0.58.0 +RELATED_IMAGE_FEATURE_SERVER=quay.io/feastdev/feature-server:0.58.0 RELATED_IMAGE_CRON_JOB=quay.io/openshift/origin-cli:4.17 diff --git a/infra/feast-operator/config/overlays/rhoai/params.env b/infra/feast-operator/config/overlays/rhoai/params.env index c54ad85aa02..585e02cc99f 100644 --- a/infra/feast-operator/config/overlays/rhoai/params.env +++ b/infra/feast-operator/config/overlays/rhoai/params.env @@ -1,3 +1,3 @@ -RELATED_IMAGE_FEAST_OPERATOR=quay.io/feastdev/feast-operator:0.57.0 -RELATED_IMAGE_FEATURE_SERVER=quay.io/feastdev/feature-server:0.57.0 +RELATED_IMAGE_FEAST_OPERATOR=quay.io/feastdev/feast-operator:0.58.0 +RELATED_IMAGE_FEATURE_SERVER=quay.io/feastdev/feature-server:0.58.0 RELATED_IMAGE_CRON_JOB=registry.redhat.io/openshift4/ose-cli@sha256:bc35a9fc663baf0d6493cc57e89e77a240a36c43cf38fb78d8e61d3b87cf5cc5 \ No newline at end of file diff --git a/infra/feast-operator/dist/install.yaml b/infra/feast-operator/dist/install.yaml index 31069afe147..57e32090b19 100644 --- a/infra/feast-operator/dist/install.yaml +++ b/infra/feast-operator/dist/install.yaml @@ -16768,10 +16768,10 @@ spec: - /manager env: - name: RELATED_IMAGE_FEATURE_SERVER - value: quay.io/feastdev/feature-server:0.57.0 + value: quay.io/feastdev/feature-server:0.58.0 - name: RELATED_IMAGE_CRON_JOB value: quay.io/openshift/origin-cli:4.17 - image: quay.io/feastdev/feast-operator:0.57.0 + image: quay.io/feastdev/feast-operator:0.58.0 livenessProbe: httpGet: path: /healthz diff --git a/infra/feast-operator/dist/operator-e2e-tests b/infra/feast-operator/dist/operator-e2e-tests index 0734ab81a9e..2e725f24970 100755 Binary files a/infra/feast-operator/dist/operator-e2e-tests and b/infra/feast-operator/dist/operator-e2e-tests differ diff --git a/infra/feast-operator/internal/controller/authz/authz.go b/infra/feast-operator/internal/controller/authz/authz.go index 33a3594607f..9cb5b7c9554 100644 --- a/infra/feast-operator/internal/controller/authz/authz.go +++ b/infra/feast-operator/internal/controller/authz/authz.go @@ -4,7 +4,7 @@ import ( "context" "slices" - feastdevv1alpha1 "github.com/feast-dev/feast/infra/feast-operator/api/v1alpha1" + feastdevv1 "github.com/feast-dev/feast/infra/feast-operator/api/v1" "github.com/feast-dev/feast/infra/feast-operator/internal/controller/services" rbacv1 "k8s.io/api/rbac/v1" apimeta "k8s.io/apimachinery/pkg/api/meta" @@ -337,7 +337,7 @@ func (authz *FeastAuthorization) getFeastRoleName() string { return GetFeastRoleName(authz.Handler.FeatureStore) } -func GetFeastRoleName(featureStore *feastdevv1alpha1.FeatureStore) string { +func GetFeastRoleName(featureStore *feastdevv1.FeatureStore) string { return services.GetFeastName(featureStore) } @@ -345,7 +345,7 @@ func (authz *FeastAuthorization) getFeastClusterRoleName() string { return GetFeastClusterRoleName(authz.Handler.FeatureStore) } -func GetFeastClusterRoleName(featureStore *feastdevv1alpha1.FeatureStore) string { +func GetFeastClusterRoleName(featureStore *feastdevv1.FeatureStore) string { // Use a shared ClusterRole name for all Feast instances // This allows multiple FeatureStores to share the same Token Access Review permissions return "feast-token-review-cluster-role" @@ -355,7 +355,7 @@ func (authz *FeastAuthorization) getFeastClusterRoleBindingName() string { return GetFeastClusterRoleBindingName(authz.Handler.FeatureStore) } -func GetFeastClusterRoleBindingName(featureStore *feastdevv1alpha1.FeatureStore) string { +func GetFeastClusterRoleBindingName(featureStore *feastdevv1.FeatureStore) string { return services.GetFeastName(featureStore) + "-cluster-binding" } diff --git a/infra/feast-operator/internal/controller/authz/authz_types.go b/infra/feast-operator/internal/controller/authz/authz_types.go index f955f5b40f1..aea5e5f7a65 100644 --- a/infra/feast-operator/internal/controller/authz/authz_types.go +++ b/infra/feast-operator/internal/controller/authz/authz_types.go @@ -1,7 +1,7 @@ package authz import ( - feastdevv1alpha1 "github.com/feast-dev/feast/infra/feast-operator/api/v1alpha1" + feastdevv1 "github.com/feast-dev/feast/infra/feast-operator/api/v1" "github.com/feast-dev/feast/infra/feast-operator/internal/controller/handler" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -14,15 +14,15 @@ type FeastAuthorization struct { var ( feastKubernetesAuthConditions = map[metav1.ConditionStatus]metav1.Condition{ metav1.ConditionTrue: { - Type: feastdevv1alpha1.AuthorizationReadyType, + Type: feastdevv1.AuthorizationReadyType, Status: metav1.ConditionTrue, - Reason: feastdevv1alpha1.ReadyReason, - Message: feastdevv1alpha1.KubernetesAuthzReadyMessage, + Reason: feastdevv1.ReadyReason, + Message: feastdevv1.KubernetesAuthzReadyMessage, }, metav1.ConditionFalse: { - Type: feastdevv1alpha1.AuthorizationReadyType, + Type: feastdevv1.AuthorizationReadyType, Status: metav1.ConditionFalse, - Reason: feastdevv1alpha1.KubernetesAuthzFailedReason, + Reason: feastdevv1.KubernetesAuthzFailedReason, }, } ) diff --git a/infra/feast-operator/internal/controller/featurestore_controller.go b/infra/feast-operator/internal/controller/featurestore_controller.go index 55bfb532f66..a9591d97c8a 100644 --- a/infra/feast-operator/internal/controller/featurestore_controller.go +++ b/infra/feast-operator/internal/controller/featurestore_controller.go @@ -18,7 +18,6 @@ package controller import ( "context" - "encoding/json" "reflect" "time" @@ -38,7 +37,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" feastdevv1 "github.com/feast-dev/feast/infra/feast-operator/api/v1" - feastdevv1alpha1 "github.com/feast-dev/feast/infra/feast-operator/api/v1alpha1" "github.com/feast-dev/feast/infra/feast-operator/internal/controller/authz" feasthandler "github.com/feast-dev/feast/infra/feast-operator/internal/controller/handler" "github.com/feast-dev/feast/infra/feast-operator/internal/controller/services" @@ -68,37 +66,6 @@ type FeatureStoreReconciler struct { // +kubebuilder:rbac:groups=route.openshift.io,resources=routes,verbs=get;list;create;update;watch;delete // +kubebuilder:rbac:groups=batch,resources=cronjobs,verbs=get;list;watch;create;update;patch;delete -// convertV1ToV1Alpha1 converts a v1 FeatureStore to v1alpha1 for internal use -// Since both types have identical structures, we use JSON marshaling/unmarshaling -func convertV1ToV1Alpha1(v1Obj *feastdevv1.FeatureStore) *feastdevv1alpha1.FeatureStore { - // Use JSON marshaling/unmarshaling since both types have identical JSON structure - v1alpha1Obj := &feastdevv1alpha1.FeatureStore{ - ObjectMeta: v1Obj.ObjectMeta, - } - - // Copy spec and status using JSON as intermediate format - specData, err := json.Marshal(v1Obj.Spec) - if err != nil { - // If marshaling fails, return object with just metadata - return v1alpha1Obj - } - if err := json.Unmarshal(specData, &v1alpha1Obj.Spec); err != nil { - // If unmarshaling fails, return object with just metadata - return v1alpha1Obj - } - statusData, err := json.Marshal(v1Obj.Status) - if err != nil { - // If marshaling fails, return object with spec but no status - return v1alpha1Obj - } - if err := json.Unmarshal(statusData, &v1alpha1Obj.Status); err != nil { - // If unmarshaling fails, return object with spec but no status - return v1alpha1Obj - } - - return v1alpha1Obj -} - // Reconcile is part of the main kubernetes reconciliation loop which aims to // move the current state of the cluster closer to the desired state. // For more details, check Reconcile and its Result here: @@ -106,17 +73,15 @@ func convertV1ToV1Alpha1(v1Obj *feastdevv1.FeatureStore) *feastdevv1alpha1.Featu func (r *FeatureStoreReconciler) Reconcile(ctx context.Context, req ctrl.Request) (result ctrl.Result, recErr error) { logger := log.FromContext(ctx) - // Try to get as v1 first (storage version), then fall back to v1alpha1 - var cr *feastdevv1alpha1.FeatureStore - var originalV1Obj *feastdevv1.FeatureStore - v1Obj := &feastdevv1.FeatureStore{} - err := r.Get(ctx, req.NamespacedName, v1Obj) + // Get the FeatureStore using v1 (storage version) + cr := &feastdevv1.FeatureStore{} + err := r.Get(ctx, req.NamespacedName, cr) if err != nil { if apierrors.IsNotFound(err) { // CR deleted since request queued, child objects getting GC'd, no requeue logger.V(1).Info("FeatureStore CR not found, has been deleted") // Clean up namespace registry entry even if the CR is not found - if err := r.cleanupNamespaceRegistry(ctx, &feastdevv1alpha1.FeatureStore{ + if err := r.cleanupNamespaceRegistry(ctx, &feastdevv1.FeatureStore{ ObjectMeta: metav1.ObjectMeta{ Name: req.NamespacedName.Name, Namespace: req.NamespacedName.Namespace, @@ -127,33 +92,8 @@ func (r *FeatureStoreReconciler) Reconcile(ctx context.Context, req ctrl.Request } return ctrl.Result{}, nil } - // Try v1alpha1 if v1 fails - v1alpha1Obj := &feastdevv1alpha1.FeatureStore{} - err = r.Get(ctx, req.NamespacedName, v1alpha1Obj) - if err != nil { - if apierrors.IsNotFound(err) { - // CR deleted since request queued, child objects getting GC'd, no requeue - logger.V(1).Info("FeatureStore CR not found, has been deleted") - // Clean up namespace registry entry even if the CR is not found - if err := r.cleanupNamespaceRegistry(ctx, &feastdevv1alpha1.FeatureStore{ - ObjectMeta: metav1.ObjectMeta{ - Name: req.NamespacedName.Name, - Namespace: req.NamespacedName.Namespace, - }, - }); err != nil { - logger.Error(err, "Failed to clean up namespace registry entry for deleted FeatureStore") - // Don't return error here as the CR is already deleted - } - return ctrl.Result{}, nil - } - logger.Error(err, "Unable to get FeatureStore CR") - return ctrl.Result{}, err - } - cr = v1alpha1Obj - } else { - // Convert v1 to v1alpha1 for internal use - originalV1Obj = v1Obj - cr = convertV1ToV1Alpha1(v1Obj) + logger.Error(err, "Unable to get FeatureStore CR") + return ctrl.Result{}, err } currentStatus := cr.Status.DeepCopy() @@ -169,27 +109,7 @@ func (r *FeatureStoreReconciler) Reconcile(ctx context.Context, req ctrl.Request result, recErr = r.deployFeast(ctx, cr) if cr.DeletionTimestamp == nil && !reflect.DeepEqual(currentStatus, cr.Status) { - // Update status - need to update in the original version (v1 if it was v1, v1alpha1 if it was v1alpha1) - var statusObj client.Object - if originalV1Obj != nil { - // Convert back to v1 for status update - originalV1Obj.Status = feastdevv1.FeatureStoreStatus{} - statusData, err := json.Marshal(cr.Status) - if err != nil { - logger.Error(err, "Failed to marshal status for v1 conversion") - statusObj = cr - } else { - if err := json.Unmarshal(statusData, &originalV1Obj.Status); err != nil { - logger.Error(err, "Failed to unmarshal status for v1 conversion") - statusObj = cr - } else { - statusObj = originalV1Obj - } - } - } else { - statusObj = cr - } - if err = r.Client.Status().Update(ctx, statusObj); err != nil { + if err = r.Client.Status().Update(ctx, cr); err != nil { if apierrors.IsConflict(err) { logger.Info("FeatureStore object modified, retry syncing status") // Re-queue and preserve existing recErr @@ -222,13 +142,13 @@ func (r *FeatureStoreReconciler) Reconcile(ctx context.Context, req ctrl.Request return result, recErr } -func (r *FeatureStoreReconciler) deployFeast(ctx context.Context, cr *feastdevv1alpha1.FeatureStore) (result ctrl.Result, err error) { +func (r *FeatureStoreReconciler) deployFeast(ctx context.Context, cr *feastdevv1.FeatureStore) (result ctrl.Result, err error) { logger := log.FromContext(ctx) condition := metav1.Condition{ - Type: feastdevv1alpha1.ReadyType, + Type: feastdevv1.ReadyType, Status: metav1.ConditionTrue, - Reason: feastdevv1alpha1.ReadyReason, - Message: feastdevv1alpha1.ReadyMessage, + Reason: feastdevv1.ReadyReason, + Message: feastdevv1.ReadyMessage, } feast := services.FeastServices{ Handler: feasthandler.FeastHandler{ @@ -253,19 +173,19 @@ func (r *FeatureStoreReconciler) deployFeast(ctx context.Context, cr *feastdevv1 } if err != nil { condition = metav1.Condition{ - Type: feastdevv1alpha1.ReadyType, + Type: feastdevv1.ReadyType, Status: metav1.ConditionFalse, - Reason: feastdevv1alpha1.FailedReason, + Reason: feastdevv1.FailedReason, Message: "Error: " + err.Error(), } } else { deployment, deploymentErr := feast.GetDeployment() if deploymentErr != nil { condition = metav1.Condition{ - Type: feastdevv1alpha1.ReadyType, + Type: feastdevv1.ReadyType, Status: metav1.ConditionUnknown, - Reason: feastdevv1alpha1.DeploymentNotAvailableReason, - Message: feastdevv1alpha1.DeploymentNotAvailableMessage, + Reason: feastdevv1.DeploymentNotAvailableReason, + Message: feastdevv1.DeploymentNotAvailableMessage, } result = errResult @@ -273,10 +193,10 @@ func (r *FeatureStoreReconciler) deployFeast(ctx context.Context, cr *feastdevv1 isDeployAvailable := services.IsDeploymentAvailable(deployment.Status.Conditions) if !isDeployAvailable { condition = metav1.Condition{ - Type: feastdevv1alpha1.ReadyType, + Type: feastdevv1.ReadyType, Status: metav1.ConditionUnknown, - Reason: feastdevv1alpha1.DeploymentNotAvailableReason, - Message: feastdevv1alpha1.DeploymentNotAvailableMessage, + Reason: feastdevv1.DeploymentNotAvailableReason, + Message: feastdevv1.DeploymentNotAvailableMessage, } result = errResult @@ -286,12 +206,12 @@ func (r *FeatureStoreReconciler) deployFeast(ctx context.Context, cr *feastdevv1 logger.Info(condition.Message) apimeta.SetStatusCondition(&cr.Status.Conditions, condition) - if apimeta.IsStatusConditionTrue(cr.Status.Conditions, feastdevv1alpha1.ReadyType) { - cr.Status.Phase = feastdevv1alpha1.ReadyPhase - } else if apimeta.IsStatusConditionFalse(cr.Status.Conditions, feastdevv1alpha1.ReadyType) { - cr.Status.Phase = feastdevv1alpha1.FailedPhase + if apimeta.IsStatusConditionTrue(cr.Status.Conditions, feastdevv1.ReadyType) { + cr.Status.Phase = feastdevv1.ReadyPhase + } else if apimeta.IsStatusConditionFalse(cr.Status.Conditions, feastdevv1.ReadyType) { + cr.Status.Phase = feastdevv1.FailedPhase } else { - cr.Status.Phase = feastdevv1alpha1.PendingPhase + cr.Status.Phase = feastdevv1.PendingPhase } return result, err @@ -309,11 +229,7 @@ func (r *FeatureStoreReconciler) SetupWithManager(mgr ctrl.Manager) error { Owns(&rbacv1.RoleBinding{}). Owns(&rbacv1.Role{}). Owns(&batchv1.CronJob{}). - Watches(&feastdevv1.FeatureStore{}, handler.EnqueueRequestsFromMapFunc(r.mapFeastRefsToFeastRequests)). - Watches(&feastdevv1alpha1.FeatureStore{}, handler.EnqueueRequestsFromMapFunc(r.mapFeastRefsToFeastRequests)) - - // Also watch v1alpha1 for backwards compatibility - bldr = bldr.Watches(&feastdevv1alpha1.FeatureStore{}, &handler.EnqueueRequestForObject{}) + Watches(&feastdevv1.FeatureStore{}, handler.EnqueueRequestsFromMapFunc(r.mapFeastRefsToFeastRequests)) if services.IsOpenShift() { bldr = bldr.Owns(&routev1.Route{}) @@ -324,7 +240,7 @@ func (r *FeatureStoreReconciler) SetupWithManager(mgr ctrl.Manager) error { } // cleanupNamespaceRegistry removes the feature store instance from the namespace registry -func (r *FeatureStoreReconciler) cleanupNamespaceRegistry(ctx context.Context, cr *feastdevv1alpha1.FeatureStore) error { +func (r *FeatureStoreReconciler) cleanupNamespaceRegistry(ctx context.Context, cr *feastdevv1.FeatureStore) error { feast := services.FeastServices{ Handler: feasthandler.FeastHandler{ Client: r.Client, @@ -341,20 +257,14 @@ func (r *FeatureStoreReconciler) cleanupNamespaceRegistry(ctx context.Context, c func (r *FeatureStoreReconciler) mapFeastRefsToFeastRequests(ctx context.Context, object client.Object) []reconcile.Request { logger := log.FromContext(ctx) - // Handle both v1 and v1alpha1 versions - var feastRef *feastdevv1alpha1.FeatureStore - switch obj := object.(type) { - case *feastdevv1.FeatureStore: - feastRef = convertV1ToV1Alpha1(obj) - case *feastdevv1alpha1.FeatureStore: - feastRef = obj - default: + feastRef, ok := object.(*feastdevv1.FeatureStore) + if !ok { logger.Error(nil, "Unexpected object type in mapFeastRefsToFeastRequests") return nil } // list all FeatureStores in the cluster - var feastList feastdevv1alpha1.FeatureStoreList + var feastList feastdevv1.FeatureStoreList if err := r.List(ctx, &feastList, client.InNamespace("")); err != nil { logger.Error(err, "could not list FeatureStores. "+ "FeatureStores affected by changes to the referenced FeatureStore object will not be reconciled.") diff --git a/infra/feast-operator/internal/controller/featurestore_controller_cronjob_test.go b/infra/feast-operator/internal/controller/featurestore_controller_cronjob_test.go index f3827136342..c329d70f06d 100644 --- a/infra/feast-operator/internal/controller/featurestore_controller_cronjob_test.go +++ b/infra/feast-operator/internal/controller/featurestore_controller_cronjob_test.go @@ -90,7 +90,7 @@ var _ = Describe("FeatureStore Controller - Feast CronJob", func() { Client: controllerReconciler.Client, Context: ctx, Scheme: controllerReconciler.Scheme, - FeatureStore: convertV1ToV1Alpha1ForTests(resource), + FeatureStore: resource, }, } @@ -210,7 +210,7 @@ var _ = Describe("FeatureStore Controller - Feast CronJob", func() { Client: controllerReconciler.Client, Context: ctx, Scheme: controllerReconciler.Scheme, - FeatureStore: convertV1ToV1Alpha1ForTests(resource), + FeatureStore: resource, }, } diff --git a/infra/feast-operator/internal/controller/featurestore_controller_db_store_test.go b/infra/feast-operator/internal/controller/featurestore_controller_db_store_test.go index 159b836bc1c..d17bffb2377 100644 --- a/infra/feast-operator/internal/controller/featurestore_controller_db_store_test.go +++ b/infra/feast-operator/internal/controller/featurestore_controller_db_store_test.go @@ -352,7 +352,7 @@ var _ = Describe("FeatureStore Controller - db storage services", func() { Client: controllerReconciler.Client, Context: ctx, Scheme: controllerReconciler.Scheme, - FeatureStore: convertV1ToV1Alpha1ForTests(resource), + FeatureStore: resource, }, } Expect(resource.Status).NotTo(BeNil()) @@ -545,7 +545,7 @@ var _ = Describe("FeatureStore Controller - db storage services", func() { Client: controllerReconciler.Client, Context: ctx, Scheme: controllerReconciler.Scheme, - FeatureStore: convertV1ToV1Alpha1ForTests(resource), + FeatureStore: resource, }, } @@ -691,7 +691,7 @@ var _ = Describe("FeatureStore Controller - db storage services", func() { resource = &feastdevv1.FeatureStore{} err = k8sClient.Get(ctx, typeNamespacedName, resource) Expect(err).NotTo(HaveOccurred()) - feast.Handler.FeatureStore = convertV1ToV1Alpha1ForTests(resource) + feast.Handler.FeatureStore = resource // check online config err = k8sClient.Get(ctx, types.NamespacedName{ diff --git a/infra/feast-operator/internal/controller/featurestore_controller_ephemeral_test.go b/infra/feast-operator/internal/controller/featurestore_controller_ephemeral_test.go index e048684bcda..212fa80228b 100644 --- a/infra/feast-operator/internal/controller/featurestore_controller_ephemeral_test.go +++ b/infra/feast-operator/internal/controller/featurestore_controller_ephemeral_test.go @@ -119,7 +119,7 @@ var _ = Describe("FeatureStore Controller-Ephemeral services", func() { Client: controllerReconciler.Client, Context: ctx, Scheme: controllerReconciler.Scheme, - FeatureStore: convertV1ToV1Alpha1ForTests(resource), + FeatureStore: resource, }, } Expect(resource.Status).NotTo(BeNil()) @@ -260,7 +260,7 @@ var _ = Describe("FeatureStore Controller-Ephemeral services", func() { Client: controllerReconciler.Client, Context: ctx, Scheme: controllerReconciler.Scheme, - FeatureStore: convertV1ToV1Alpha1ForTests(resource), + FeatureStore: resource, }, } @@ -392,7 +392,7 @@ var _ = Describe("FeatureStore Controller-Ephemeral services", func() { resource = &feastdevv1.FeatureStore{} err = k8sClient.Get(ctx, typeNamespacedName, resource) Expect(err).NotTo(HaveOccurred()) - feast.Handler.FeatureStore = convertV1ToV1Alpha1ForTests(resource) + feast.Handler.FeatureStore = resource // check registry err = k8sClient.Get(ctx, types.NamespacedName{ diff --git a/infra/feast-operator/internal/controller/featurestore_controller_kubernetes_auth_test.go b/infra/feast-operator/internal/controller/featurestore_controller_kubernetes_auth_test.go index 90dfd4f11ca..3bfab485e85 100644 --- a/infra/feast-operator/internal/controller/featurestore_controller_kubernetes_auth_test.go +++ b/infra/feast-operator/internal/controller/featurestore_controller_kubernetes_auth_test.go @@ -104,7 +104,7 @@ var _ = Describe("FeatureStore Controller-Kubernetes authorization", func() { Client: controllerReconciler.Client, Context: ctx, Scheme: controllerReconciler.Scheme, - FeatureStore: convertV1ToV1Alpha1ForTests(resource), + FeatureStore: resource, }, } Expect(resource.Status).NotTo(BeNil()) @@ -221,7 +221,7 @@ var _ = Describe("FeatureStore Controller-Kubernetes authorization", func() { // check Feast Role feastRole := &rbacv1.Role{} err = k8sClient.Get(ctx, types.NamespacedName{ - Name: authz.GetFeastRoleName(convertV1ToV1Alpha1ForTests(resource)), + Name: authz.GetFeastRoleName(resource), Namespace: resource.Namespace, }, feastRole) @@ -241,7 +241,7 @@ var _ = Describe("FeatureStore Controller-Kubernetes authorization", func() { // check RoleBinding roleBinding := &rbacv1.RoleBinding{} err = k8sClient.Get(ctx, types.NamespacedName{ - Name: authz.GetFeastRoleName(convertV1ToV1Alpha1ForTests(resource)), + Name: authz.GetFeastRoleName(resource), Namespace: resource.Namespace, }, roleBinding) @@ -283,7 +283,7 @@ var _ = Describe("FeatureStore Controller-Kubernetes authorization", func() { resource = &feastdevv1.FeatureStore{} err = k8sClient.Get(ctx, typeNamespacedName, resource) Expect(err).NotTo(HaveOccurred()) - feast.Handler.FeatureStore = convertV1ToV1Alpha1ForTests(resource) + feast.Handler.FeatureStore = resource // check new Roles for _, roleName := range rolesNew { @@ -321,7 +321,7 @@ var _ = Describe("FeatureStore Controller-Kubernetes authorization", func() { resource = &feastdevv1.FeatureStore{} err = k8sClient.Get(ctx, typeNamespacedName, resource) Expect(err).NotTo(HaveOccurred()) - feast.Handler.FeatureStore = convertV1ToV1Alpha1ForTests(resource) + feast.Handler.FeatureStore = resource // check no Roles for _, roleName := range roles { @@ -337,7 +337,7 @@ var _ = Describe("FeatureStore Controller-Kubernetes authorization", func() { // check no RoleBinding roleBinding = &rbacv1.RoleBinding{} err = k8sClient.Get(ctx, types.NamespacedName{ - Name: authz.GetFeastRoleName(convertV1ToV1Alpha1ForTests(resource)), + Name: authz.GetFeastRoleName(resource), Namespace: resource.Namespace, }, roleBinding) @@ -385,7 +385,7 @@ var _ = Describe("FeatureStore Controller-Kubernetes authorization", func() { Client: controllerReconciler.Client, Context: ctx, Scheme: controllerReconciler.Scheme, - FeatureStore: convertV1ToV1Alpha1ForTests(resource), + FeatureStore: resource, }, } diff --git a/infra/feast-operator/internal/controller/featurestore_controller_loglevel_test.go b/infra/feast-operator/internal/controller/featurestore_controller_loglevel_test.go index 0316a9ba885..948ddec210d 100644 --- a/infra/feast-operator/internal/controller/featurestore_controller_loglevel_test.go +++ b/infra/feast-operator/internal/controller/featurestore_controller_loglevel_test.go @@ -116,7 +116,7 @@ var _ = Describe("FeatureStore Controller - Feast service LogLevel", func() { Client: controllerReconciler.Client, Context: ctx, Scheme: controllerReconciler.Scheme, - FeatureStore: convertV1ToV1Alpha1ForTests(resource), + FeatureStore: resource, }, } @@ -223,7 +223,7 @@ var _ = Describe("FeatureStore Controller - Feast service LogLevel", func() { Client: controllerReconciler.Client, Context: ctx, Scheme: controllerReconciler.Scheme, - FeatureStore: convertV1ToV1Alpha1ForTests(resource), + FeatureStore: resource, }, } diff --git a/infra/feast-operator/internal/controller/featurestore_controller_objectstore_test.go b/infra/feast-operator/internal/controller/featurestore_controller_objectstore_test.go index 1baa061f998..37d22094147 100644 --- a/infra/feast-operator/internal/controller/featurestore_controller_objectstore_test.go +++ b/infra/feast-operator/internal/controller/featurestore_controller_objectstore_test.go @@ -114,7 +114,7 @@ var _ = Describe("FeatureStore Controller-Ephemeral services", func() { Client: controllerReconciler.Client, Context: ctx, Scheme: controllerReconciler.Scheme, - FeatureStore: convertV1ToV1Alpha1ForTests(resource), + FeatureStore: resource, }, } Expect(resource.Status).NotTo(BeNil()) @@ -211,7 +211,7 @@ var _ = Describe("FeatureStore Controller-Ephemeral services", func() { resource = &feastdevv1.FeatureStore{} err = k8sClient.Get(ctx, typeNamespacedName, resource) Expect(err).NotTo(HaveOccurred()) - feast.Handler.FeatureStore = convertV1ToV1Alpha1ForTests(resource) + feast.Handler.FeatureStore = resource Expect(resource.Status.Applied.Services.Registry.Local.Persistence.FilePersistence.S3AdditionalKwargs).NotTo(BeNil()) Expect(resource.Status.Applied.Services.Registry.Local.Persistence.FilePersistence.S3AdditionalKwargs).NotTo(Equal(&s3AdditionalKwargs)) Expect(resource.Status.Applied.Services.Registry.Local.Persistence.FilePersistence.S3AdditionalKwargs).To(Equal(&newS3AdditionalKwargs)) @@ -267,7 +267,7 @@ var _ = Describe("FeatureStore Controller-Ephemeral services", func() { Client: controllerReconciler.Client, Context: ctx, Scheme: controllerReconciler.Scheme, - FeatureStore: convertV1ToV1Alpha1ForTests(resource), + FeatureStore: resource, }, } @@ -345,7 +345,7 @@ var _ = Describe("FeatureStore Controller-Ephemeral services", func() { resource = &feastdevv1.FeatureStore{} err = k8sClient.Get(ctx, typeNamespacedName, resource) Expect(err).NotTo(HaveOccurred()) - feast.Handler.FeatureStore = convertV1ToV1Alpha1ForTests(resource) + feast.Handler.FeatureStore = resource // check registry config err = k8sClient.Get(ctx, types.NamespacedName{ @@ -402,7 +402,7 @@ var _ = Describe("FeatureStore Controller-Ephemeral services", func() { Client: controllerReconciler.Client, Context: ctx, Scheme: controllerReconciler.Scheme, - FeatureStore: convertV1ToV1Alpha1ForTests(resource), + FeatureStore: resource, }, } diff --git a/infra/feast-operator/internal/controller/featurestore_controller_oidc_auth_test.go b/infra/feast-operator/internal/controller/featurestore_controller_oidc_auth_test.go index 82cdec6b7bd..82a7ebeb4ad 100644 --- a/infra/feast-operator/internal/controller/featurestore_controller_oidc_auth_test.go +++ b/infra/feast-operator/internal/controller/featurestore_controller_oidc_auth_test.go @@ -125,7 +125,7 @@ var _ = Describe("FeatureStore Controller-OIDC authorization", func() { Client: controllerReconciler.Client, Context: ctx, Scheme: controllerReconciler.Scheme, - FeatureStore: convertV1ToV1Alpha1ForTests(resource), + FeatureStore: resource, }, } Expect(resource.Status).NotTo(BeNil()) @@ -234,7 +234,7 @@ var _ = Describe("FeatureStore Controller-OIDC authorization", func() { // check Feast Role feastRole := &rbacv1.Role{} err = k8sClient.Get(ctx, types.NamespacedName{ - Name: authz.GetFeastRoleName(convertV1ToV1Alpha1ForTests(resource)), + Name: authz.GetFeastRoleName(resource), Namespace: resource.Namespace, }, feastRole) @@ -244,7 +244,7 @@ var _ = Describe("FeatureStore Controller-OIDC authorization", func() { // check RoleBinding roleBinding := &rbacv1.RoleBinding{} err = k8sClient.Get(ctx, types.NamespacedName{ - Name: authz.GetFeastRoleName(convertV1ToV1Alpha1ForTests(resource)), + Name: authz.GetFeastRoleName(resource), Namespace: resource.Namespace, }, roleBinding) @@ -273,12 +273,12 @@ var _ = Describe("FeatureStore Controller-OIDC authorization", func() { resource = &feastdevv1.FeatureStore{} err = k8sClient.Get(ctx, typeNamespacedName, resource) Expect(err).NotTo(HaveOccurred()) - feast.Handler.FeatureStore = convertV1ToV1Alpha1ForTests(resource) + feast.Handler.FeatureStore = resource // check no RoleBinding roleBinding = &rbacv1.RoleBinding{} err = k8sClient.Get(ctx, types.NamespacedName{ - Name: authz.GetFeastRoleName(convertV1ToV1Alpha1ForTests(resource)), + Name: authz.GetFeastRoleName(resource), Namespace: resource.Namespace, }, roleBinding) @@ -326,7 +326,7 @@ var _ = Describe("FeatureStore Controller-OIDC authorization", func() { Client: controllerReconciler.Client, Context: ctx, Scheme: controllerReconciler.Scheme, - FeatureStore: convertV1ToV1Alpha1ForTests(resource), + FeatureStore: resource, }, } diff --git a/infra/feast-operator/internal/controller/featurestore_controller_pvc_test.go b/infra/feast-operator/internal/controller/featurestore_controller_pvc_test.go index f1e33761296..8e7303cee34 100644 --- a/infra/feast-operator/internal/controller/featurestore_controller_pvc_test.go +++ b/infra/feast-operator/internal/controller/featurestore_controller_pvc_test.go @@ -153,7 +153,7 @@ var _ = Describe("FeatureStore Controller-Ephemeral services", func() { Client: controllerReconciler.Client, Context: ctx, Scheme: controllerReconciler.Scheme, - FeatureStore: convertV1ToV1Alpha1ForTests(resource), + FeatureStore: resource, }, } Expect(resource.Status).NotTo(BeNil()) @@ -386,7 +386,7 @@ var _ = Describe("FeatureStore Controller-Ephemeral services", func() { resource = &feastdevv1.FeatureStore{} err = k8sClient.Get(ctx, typeNamespacedName, resource) Expect(err).NotTo(HaveOccurred()) - feast.Handler.FeatureStore = convertV1ToV1Alpha1ForTests(resource) + feast.Handler.FeatureStore = resource Expect(resource.Status.Applied.Services.OnlineStore.Persistence.FilePersistence.PvcConfig).To(BeNil()) // check online deployment/container @@ -466,7 +466,7 @@ var _ = Describe("FeatureStore Controller-Ephemeral services", func() { Client: controllerReconciler.Client, Context: ctx, Scheme: controllerReconciler.Scheme, - FeatureStore: convertV1ToV1Alpha1ForTests(resource), + FeatureStore: resource, }, } @@ -604,7 +604,7 @@ var _ = Describe("FeatureStore Controller-Ephemeral services", func() { resource = &feastdevv1.FeatureStore{} err = k8sClient.Get(ctx, typeNamespacedName, resource) Expect(err).NotTo(HaveOccurred()) - feast.Handler.FeatureStore = convertV1ToV1Alpha1ForTests(resource) + feast.Handler.FeatureStore = resource // check registry config deploy = &appsv1.Deployment{} diff --git a/infra/feast-operator/internal/controller/featurestore_controller_test.go b/infra/feast-operator/internal/controller/featurestore_controller_test.go index f65b9c58f32..bfd4a484cff 100644 --- a/infra/feast-operator/internal/controller/featurestore_controller_test.go +++ b/infra/feast-operator/internal/controller/featurestore_controller_test.go @@ -130,7 +130,7 @@ var _ = Describe("FeatureStore Controller", func() { Client: controllerReconciler.Client, Context: ctx, Scheme: controllerReconciler.Scheme, - FeatureStore: convertV1ToV1Alpha1ForTests(resource), + FeatureStore: resource, }, } @@ -319,7 +319,7 @@ var _ = Describe("FeatureStore Controller", func() { Client: controllerReconciler.Client, Context: ctx, Scheme: controllerReconciler.Scheme, - FeatureStore: convertV1ToV1Alpha1ForTests(resource), + FeatureStore: resource, }, } @@ -394,7 +394,7 @@ var _ = Describe("FeatureStore Controller", func() { Expect(err).NotTo(HaveOccurred()) // Update feast object with the refreshed resource - feast.Handler.FeatureStore = convertV1ToV1Alpha1ForTests(resource) + feast.Handler.FeatureStore = resource testConfig.Project = resourceNew.Spec.FeastProject Expect(deploy.Spec.Strategy.Type).To(Equal(appsv1.RollingUpdateDeploymentStrategyType)) @@ -434,7 +434,7 @@ var _ = Describe("FeatureStore Controller", func() { Client: controllerReconciler.Client, Context: ctx, Scheme: controllerReconciler.Scheme, - FeatureStore: convertV1ToV1Alpha1ForTests(resource), + FeatureStore: resource, }, } @@ -565,7 +565,7 @@ var _ = Describe("FeatureStore Controller", func() { Client: controllerReconciler.Client, Context: ctx, Scheme: controllerReconciler.Scheme, - FeatureStore: convertV1ToV1Alpha1ForTests(resource), + FeatureStore: resource, }, } Expect(resource.Status).NotTo(BeNil()) @@ -724,7 +724,7 @@ var _ = Describe("FeatureStore Controller", func() { Client: controllerReconciler.Client, Context: ctx, Scheme: controllerReconciler.Scheme, - FeatureStore: convertV1ToV1Alpha1ForTests(resource), + FeatureStore: resource, }, } @@ -854,7 +854,7 @@ var _ = Describe("FeatureStore Controller", func() { Expect(err).NotTo(HaveOccurred()) // Update feast object with the refreshed resource - feast.Handler.FeatureStore = convertV1ToV1Alpha1ForTests(resource) + feast.Handler.FeatureStore = resource testConfig.Project = resourceNew.Spec.FeastProject Expect(deploy.Spec.Template.Spec.Containers[0].Env).To(HaveLen(1)) @@ -912,7 +912,7 @@ var _ = Describe("FeatureStore Controller", func() { Client: controllerReconciler.Client, Context: ctx, Scheme: controllerReconciler.Scheme, - FeatureStore: convertV1ToV1Alpha1ForTests(resource), + FeatureStore: resource, }, } @@ -1124,7 +1124,7 @@ var _ = Describe("FeatureStore Controller", func() { Client: controllerReconciler.Client, Context: ctx, Scheme: controllerReconciler.Scheme, - FeatureStore: convertV1ToV1Alpha1ForTests(resource), + FeatureStore: resource, }, } @@ -1305,7 +1305,7 @@ var _ = Describe("FeatureStore Controller", func() { Client: controllerReconciler.Client, Context: ctx, Scheme: controllerReconciler.Scheme, - FeatureStore: convertV1ToV1Alpha1ForTests(resource), + FeatureStore: resource, }, } @@ -1382,7 +1382,7 @@ var _ = Describe("FeatureStore Controller", func() { Client: controllerReconciler.Client, Context: ctx, Scheme: controllerReconciler.Scheme, - FeatureStore: convertV1ToV1Alpha1ForTests(resource), + FeatureStore: resource, }, } diff --git a/infra/feast-operator/internal/controller/featurestore_controller_test_utils_test.go b/infra/feast-operator/internal/controller/featurestore_controller_test_utils_test.go index 65d4196f200..cadb1ac13e6 100644 --- a/infra/feast-operator/internal/controller/featurestore_controller_test_utils_test.go +++ b/infra/feast-operator/internal/controller/featurestore_controller_test_utils_test.go @@ -2,12 +2,10 @@ package controller import ( "context" - "encoding/json" . "github.com/onsi/ginkgo/v2" feastdevv1 "github.com/feast-dev/feast/infra/feast-operator/api/v1" - feastdevv1alpha1 "github.com/feast-dev/feast/infra/feast-operator/api/v1alpha1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" . "github.com/onsi/gomega" @@ -171,28 +169,3 @@ func withEnvFrom() *[]corev1.EnvFromSource { } } - -// convertV1ToV1Alpha1ForTests converts a v1 FeatureStore to v1alpha1 for use with services package -// This is needed because the services package still uses v1alpha1 internally -func convertV1ToV1Alpha1ForTests(v1Obj *feastdevv1.FeatureStore) *feastdevv1alpha1.FeatureStore { - v1alpha1Obj := &feastdevv1alpha1.FeatureStore{ - ObjectMeta: v1Obj.ObjectMeta, - } - - specData, err := json.Marshal(v1Obj.Spec) - if err != nil { - return v1alpha1Obj - } - if err := json.Unmarshal(specData, &v1alpha1Obj.Spec); err != nil { - return v1alpha1Obj - } - statusData, err := json.Marshal(v1Obj.Status) - if err != nil { - return v1alpha1Obj - } - if err := json.Unmarshal(statusData, &v1alpha1Obj.Status); err != nil { - return v1alpha1Obj - } - - return v1alpha1Obj -} diff --git a/infra/feast-operator/internal/controller/featurestore_controller_tls_test.go b/infra/feast-operator/internal/controller/featurestore_controller_tls_test.go index 666b745fe40..0af097120ce 100644 --- a/infra/feast-operator/internal/controller/featurestore_controller_tls_test.go +++ b/infra/feast-operator/internal/controller/featurestore_controller_tls_test.go @@ -127,7 +127,7 @@ var _ = Describe("FeatureStore Controller - Feast service TLS", func() { Client: controllerReconciler.Client, Context: ctx, Scheme: controllerReconciler.Scheme, - FeatureStore: convertV1ToV1Alpha1ForTests(resource), + FeatureStore: resource, }, } @@ -224,7 +224,7 @@ var _ = Describe("FeatureStore Controller - Feast service TLS", func() { Client: controllerReconciler.Client, Context: ctx, Scheme: controllerReconciler.Scheme, - FeatureStore: convertV1ToV1Alpha1ForTests(resource), + FeatureStore: resource, }, } @@ -390,7 +390,7 @@ var _ = Describe("FeatureStore Controller - Feast service TLS", func() { resource = &feastdevv1.FeatureStore{} err = k8sClient.Get(ctx, typeNamespacedName, resource) Expect(err).NotTo(HaveOccurred()) - feast.Handler.FeatureStore = convertV1ToV1Alpha1ForTests(resource) + feast.Handler.FeatureStore = resource // check registry deploy = &appsv1.Deployment{} @@ -511,7 +511,7 @@ var _ = Describe("Test mountCustomCABundle functionality", func() { Client: controllerReconciler.Client, Context: ctx, Scheme: controllerReconciler.Scheme, - FeatureStore: convertV1ToV1Alpha1ForTests(resource), + FeatureStore: resource, }, } @@ -554,7 +554,7 @@ var _ = Describe("Test mountCustomCABundle functionality", func() { Client: controllerReconciler.Client, Context: ctx, Scheme: controllerReconciler.Scheme, - FeatureStore: convertV1ToV1Alpha1ForTests(resource), + FeatureStore: resource, }, } diff --git a/infra/feast-operator/internal/controller/featurestore_controller_volume_volumemount_test.go b/infra/feast-operator/internal/controller/featurestore_controller_volume_volumemount_test.go index d7aac6bf507..5751227e6ad 100644 --- a/infra/feast-operator/internal/controller/featurestore_controller_volume_volumemount_test.go +++ b/infra/feast-operator/internal/controller/featurestore_controller_volume_volumemount_test.go @@ -108,7 +108,7 @@ var _ = Describe("FeatureStore Controller - Deployment Volumes and VolumeMounts" Client: controllerReconciler.Client, Context: ctx, Scheme: controllerReconciler.Scheme, - FeatureStore: convertV1ToV1Alpha1ForTests(resource), + FeatureStore: resource, }, } diff --git a/infra/feast-operator/internal/controller/handler/handler_types.go b/infra/feast-operator/internal/controller/handler/handler_types.go index 5a26776f569..d6a66c55ffd 100644 --- a/infra/feast-operator/internal/controller/handler/handler_types.go +++ b/infra/feast-operator/internal/controller/handler/handler_types.go @@ -3,7 +3,7 @@ package handler import ( "context" - feastdevv1alpha1 "github.com/feast-dev/feast/infra/feast-operator/api/v1alpha1" + feastdevv1 "github.com/feast-dev/feast/infra/feast-operator/api/v1" "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -16,5 +16,5 @@ type FeastHandler struct { client.Client Context context.Context Scheme *runtime.Scheme - FeatureStore *feastdevv1alpha1.FeatureStore + FeatureStore *feastdevv1.FeatureStore } diff --git a/infra/feast-operator/internal/controller/services/cronjob.go b/infra/feast-operator/internal/controller/services/cronjob.go index 5ac017468ab..f3b978928f7 100644 --- a/infra/feast-operator/internal/controller/services/cronjob.go +++ b/infra/feast-operator/internal/controller/services/cronjob.go @@ -4,7 +4,7 @@ import ( "os" "strconv" - feastdevv1alpha1 "github.com/feast-dev/feast/infra/feast-operator/api/v1alpha1" + feastdevv1 "github.com/feast-dev/feast/infra/feast-operator/api/v1" "github.com/feast-dev/feast/infra/feast-operator/internal/controller/handler" appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" @@ -272,7 +272,7 @@ func (feast *FeastServices) getCronJobRoleName() string { // defaults to a CronJob configuration that will never run. this default Job can be executed manually, however. // e.g. kubectl create job --from=cronjob/feast-sample feast-sample-job -func setDefaultCronJobConfigs(feastCronJob *feastdevv1alpha1.FeastCronJob) { +func setDefaultCronJobConfigs(feastCronJob *feastdevv1.FeastCronJob) { if len(feastCronJob.Schedule) == 0 { feastCronJob.Schedule = "@yearly" if feastCronJob.Suspend == nil { @@ -286,7 +286,7 @@ func setDefaultCronJobConfigs(feastCronJob *feastdevv1alpha1.FeastCronJob) { } } if feastCronJob.ContainerConfigs == nil { - feastCronJob.ContainerConfigs = &feastdevv1alpha1.CronJobContainerConfigs{} + feastCronJob.ContainerConfigs = &feastdevv1.CronJobContainerConfigs{} } if feastCronJob.ContainerConfigs.Image == nil { feastCronJob.ContainerConfigs.Image = getCronJobImage() diff --git a/infra/feast-operator/internal/controller/services/repo_config.go b/infra/feast-operator/internal/controller/services/repo_config.go index 44b295066f4..b893f031d41 100644 --- a/infra/feast-operator/internal/controller/services/repo_config.go +++ b/infra/feast-operator/internal/controller/services/repo_config.go @@ -22,7 +22,7 @@ import ( "path" "strings" - feastdevv1alpha1 "github.com/feast-dev/feast/infra/feast-operator/api/v1alpha1" + feastdevv1 "github.com/feast-dev/feast/infra/feast-operator/api/v1" "gopkg.in/yaml.v3" ) @@ -48,7 +48,7 @@ func (feast *FeastServices) getServiceRepoConfig() (RepoConfig, error) { } func getServiceRepoConfig( - featureStore *feastdevv1alpha1.FeatureStore, + featureStore *feastdevv1.FeatureStore, secretExtractionFunc func(storeType string, secretRef string, secretKeyName string) (map[string]interface{}, error)) (RepoConfig, error) { repoConfig, err := getBaseServiceRepoConfig(featureStore, secretExtractionFunc) if err != nil { @@ -82,7 +82,7 @@ func getServiceRepoConfig( } func getBaseServiceRepoConfig( - featureStore *feastdevv1alpha1.FeatureStore, + featureStore *feastdevv1.FeatureStore, secretExtractionFunc func(storeType string, secretRef string, secretKeyName string) (map[string]interface{}, error)) (RepoConfig, error) { repoConfig := defaultRepoConfig(featureStore) @@ -116,7 +116,7 @@ func getBaseServiceRepoConfig( return repoConfig, nil } -func setRepoConfigRegistry(services *feastdevv1alpha1.FeatureStoreServices, secretExtractionFunc func(storeType string, secretRef string, secretKeyName string) (map[string]interface{}, error), repoConfig *RepoConfig) error { +func setRepoConfigRegistry(services *feastdevv1.FeatureStoreServices, secretExtractionFunc func(storeType string, secretRef string, secretKeyName string) (map[string]interface{}, error), repoConfig *RepoConfig) error { registryPersistence := services.Registry.Local.Persistence if registryPersistence != nil { @@ -182,7 +182,7 @@ func setRepoConfigRegistry(services *feastdevv1alpha1.FeatureStoreServices, secr return nil } -func setRepoConfigOnline(services *feastdevv1alpha1.FeatureStoreServices, secretExtractionFunc func(storeType string, secretRef string, secretKeyName string) (map[string]interface{}, error), repoConfig *RepoConfig) error { +func setRepoConfigOnline(services *feastdevv1.FeatureStoreServices, secretExtractionFunc func(storeType string, secretRef string, secretKeyName string) (map[string]interface{}, error), repoConfig *RepoConfig) error { onlineStorePersistence := services.OnlineStore.Persistence if onlineStorePersistence != nil { @@ -215,7 +215,7 @@ func setRepoConfigOnline(services *feastdevv1alpha1.FeatureStoreServices, secret return nil } -func setRepoConfigOffline(services *feastdevv1alpha1.FeatureStoreServices, secretExtractionFunc func(storeType string, secretRef string, secretKeyName string) (map[string]interface{}, error), repoConfig *RepoConfig) error { +func setRepoConfigOffline(services *feastdevv1.FeatureStoreServices, secretExtractionFunc func(storeType string, secretRef string, secretKeyName string) (map[string]interface{}, error), repoConfig *RepoConfig) error { repoConfig.OfflineStore = defaultOfflineStoreConfig offlineStorePersistence := services.OfflineStore.Persistence @@ -257,7 +257,7 @@ func (feast *FeastServices) getClientFeatureStoreYaml(secretExtractionFunc func( } func getClientRepoConfig( - featureStore *feastdevv1alpha1.FeatureStore, + featureStore *feastdevv1.FeatureStore, secretExtractionFunc func(storeType string, secretRef string, secretKeyName string) (map[string]interface{}, error), feast *FeastServices) (RepoConfig, error) { status := featureStore.Status @@ -307,7 +307,7 @@ func getClientRepoConfig( } func getRepoConfig( - featureStore *feastdevv1alpha1.FeatureStore, + featureStore *feastdevv1.FeatureStore, secretExtractionFunc func(storeType string, secretRef string, secretKeyName string) (map[string]interface{}, error)) (RepoConfig, error) { status := featureStore.Status repoConfig := initRepoConfig(status.Applied.FeastProject) @@ -340,7 +340,7 @@ func getRepoConfig( return repoConfig, nil } -func getActualPath(filePath string, pvcConfig *feastdevv1alpha1.PvcConfig) string { +func getActualPath(filePath string, pvcConfig *feastdevv1.PvcConfig) string { if pvcConfig == nil { return filePath } @@ -407,7 +407,7 @@ func (feast *FeastServices) GetDefaultRepoConfig() RepoConfig { return defaultRepoConfig(feast.Handler.FeatureStore) } -func defaultRepoConfig(featureStore *feastdevv1alpha1.FeatureStore) RepoConfig { +func defaultRepoConfig(featureStore *feastdevv1.FeatureStore) RepoConfig { repoConfig := initRepoConfig(featureStore.Status.Applied.FeastProject) repoConfig.OnlineStore = defaultOnlineStoreConfig(featureStore) repoConfig.Registry = defaultRegistryConfig(featureStore) @@ -422,19 +422,19 @@ func initRepoConfig(feastProject string) RepoConfig { return RepoConfig{ Project: feastProject, Provider: LocalProviderType, - EntityKeySerializationVersion: feastdevv1alpha1.SerializationVersion, + EntityKeySerializationVersion: feastdevv1.SerializationVersion, AuthzConfig: defaultAuthzConfig, } } -func defaultOnlineStoreConfig(featureStore *feastdevv1alpha1.FeatureStore) OnlineStoreConfig { +func defaultOnlineStoreConfig(featureStore *feastdevv1.FeatureStore) OnlineStoreConfig { return OnlineStoreConfig{ Type: OnlineSqliteConfigType, Path: defaultOnlineStorePath(featureStore), } } -func defaultRegistryConfig(featureStore *feastdevv1alpha1.FeatureStore) RegistryConfig { +func defaultRegistryConfig(featureStore *feastdevv1.FeatureStore) RegistryConfig { return RegistryConfig{ RegistryType: RegistryFileConfigType, Path: defaultRegistryPath(featureStore), diff --git a/infra/feast-operator/internal/controller/services/repo_config_test.go b/infra/feast-operator/internal/controller/services/repo_config_test.go index 72ba289900d..b31e40d7382 100644 --- a/infra/feast-operator/internal/controller/services/repo_config_test.go +++ b/infra/feast-operator/internal/controller/services/repo_config_test.go @@ -25,7 +25,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - feastdevv1alpha1 "github.com/feast-dev/feast/infra/feast-operator/api/v1alpha1" + feastdevv1 "github.com/feast-dev/feast/infra/feast-operator/api/v1" ) var projectName = "test-project" @@ -56,11 +56,11 @@ var _ = Describe("Repo Config", func() { By("Having the local registry resource") featureStore = minimalFeatureStore() testPath := "/test/file.db" - featureStore.Spec.Services = &feastdevv1alpha1.FeatureStoreServices{ - Registry: &feastdevv1alpha1.Registry{ - Local: &feastdevv1alpha1.LocalRegistryConfig{ - Persistence: &feastdevv1alpha1.RegistryPersistence{ - FilePersistence: &feastdevv1alpha1.RegistryFilePersistence{ + featureStore.Spec.Services = &feastdevv1.FeatureStoreServices{ + Registry: &feastdevv1.Registry{ + Local: &feastdevv1.LocalRegistryConfig{ + Persistence: &feastdevv1.RegistryPersistence{ + FilePersistence: &feastdevv1.RegistryFilePersistence{ Path: testPath, }, }, @@ -82,10 +82,10 @@ var _ = Describe("Repo Config", func() { Expect(repoConfig.Registry).To(Equal(expectedRegistryConfig)) By("Adding an offlineStore with PVC") - featureStore.Spec.Services.OfflineStore = &feastdevv1alpha1.OfflineStore{ - Persistence: &feastdevv1alpha1.OfflineStorePersistence{ - FilePersistence: &feastdevv1alpha1.OfflineStoreFilePersistence{ - PvcConfig: &feastdevv1alpha1.PvcConfig{ + featureStore.Spec.Services.OfflineStore = &feastdevv1.OfflineStore{ + Persistence: &feastdevv1.OfflineStorePersistence{ + FilePersistence: &feastdevv1.OfflineStoreFilePersistence{ + PvcConfig: &feastdevv1.PvcConfig{ MountPath: "/testing", }, }, @@ -105,10 +105,10 @@ var _ = Describe("Repo Config", func() { By("Having the remote registry resource") featureStore = minimalFeatureStore() - featureStore.Spec.Services = &feastdevv1alpha1.FeatureStoreServices{ - Registry: &feastdevv1alpha1.Registry{ - Remote: &feastdevv1alpha1.RemoteRegistryConfig{ - FeastRef: &feastdevv1alpha1.FeatureStoreRef{ + featureStore.Spec.Services = &feastdevv1.FeatureStoreServices{ + Registry: &feastdevv1.Registry{ + Remote: &feastdevv1.RemoteRegistryConfig{ + FeastRef: &feastdevv1.FeatureStoreRef{ Name: "registry", }, }, @@ -124,25 +124,25 @@ var _ = Describe("Repo Config", func() { By("Having the all the file services") featureStore = minimalFeatureStore() - featureStore.Spec.Services = &feastdevv1alpha1.FeatureStoreServices{ - OfflineStore: &feastdevv1alpha1.OfflineStore{ - Persistence: &feastdevv1alpha1.OfflineStorePersistence{ - FilePersistence: &feastdevv1alpha1.OfflineStoreFilePersistence{ + featureStore.Spec.Services = &feastdevv1.FeatureStoreServices{ + OfflineStore: &feastdevv1.OfflineStore{ + Persistence: &feastdevv1.OfflineStorePersistence{ + FilePersistence: &feastdevv1.OfflineStoreFilePersistence{ Type: "duckdb", }, }, }, - OnlineStore: &feastdevv1alpha1.OnlineStore{ - Persistence: &feastdevv1alpha1.OnlineStorePersistence{ - FilePersistence: &feastdevv1alpha1.OnlineStoreFilePersistence{ + OnlineStore: &feastdevv1.OnlineStore{ + Persistence: &feastdevv1.OnlineStorePersistence{ + FilePersistence: &feastdevv1.OnlineStoreFilePersistence{ Path: "/data/online.db", }, }, }, - Registry: &feastdevv1alpha1.Registry{ - Local: &feastdevv1alpha1.LocalRegistryConfig{ - Persistence: &feastdevv1alpha1.RegistryPersistence{ - FilePersistence: &feastdevv1alpha1.RegistryFilePersistence{ + Registry: &feastdevv1.Registry{ + Local: &feastdevv1.LocalRegistryConfig{ + Persistence: &feastdevv1.RegistryPersistence{ + FilePersistence: &feastdevv1.RegistryFilePersistence{ Path: "/data/registry.db", }, }, @@ -172,14 +172,14 @@ var _ = Describe("Repo Config", func() { By("Having kubernetes authorization") featureStore = minimalFeatureStore() - featureStore.Spec.AuthzConfig = &feastdevv1alpha1.AuthzConfig{ - KubernetesAuthz: &feastdevv1alpha1.KubernetesAuthz{}, + featureStore.Spec.AuthzConfig = &feastdevv1.AuthzConfig{ + KubernetesAuthz: &feastdevv1.KubernetesAuthz{}, } - featureStore.Spec.Services = &feastdevv1alpha1.FeatureStoreServices{ - OfflineStore: &feastdevv1alpha1.OfflineStore{}, - OnlineStore: &feastdevv1alpha1.OnlineStore{}, - Registry: &feastdevv1alpha1.Registry{ - Local: &feastdevv1alpha1.LocalRegistryConfig{}, + featureStore.Spec.Services = &feastdevv1.FeatureStoreServices{ + OfflineStore: &feastdevv1.OfflineStore{}, + OnlineStore: &feastdevv1.OnlineStore{}, + Registry: &feastdevv1.Registry{ + Local: &feastdevv1.LocalRegistryConfig{}, }, } ApplyDefaultsToStatus(featureStore) @@ -196,8 +196,8 @@ var _ = Describe("Repo Config", func() { Expect(repoConfig.Registry).To(Equal(defaultRegistryConfig(featureStore))) By("Having oidc authorization") - featureStore.Spec.AuthzConfig = &feastdevv1alpha1.AuthzConfig{ - OidcAuthz: &feastdevv1alpha1.OidcAuthz{ + featureStore.Spec.AuthzConfig = &feastdevv1.AuthzConfig{ + OidcAuthz: &feastdevv1.OidcAuthz{ SecretRef: corev1.LocalObjectReference{ Name: "oidc-secret", }, @@ -231,10 +231,10 @@ var _ = Describe("Repo Config", func() { By("Having the all the db services") featureStore = minimalFeatureStore() - featureStore.Spec.Services = &feastdevv1alpha1.FeatureStoreServices{ - OfflineStore: &feastdevv1alpha1.OfflineStore{ - Persistence: &feastdevv1alpha1.OfflineStorePersistence{ - DBPersistence: &feastdevv1alpha1.OfflineStoreDBStorePersistence{ + featureStore.Spec.Services = &feastdevv1.FeatureStoreServices{ + OfflineStore: &feastdevv1.OfflineStore{ + Persistence: &feastdevv1.OfflineStorePersistence{ + DBPersistence: &feastdevv1.OfflineStoreDBStorePersistence{ Type: string(OfflineDBPersistenceSnowflakeConfigType), SecretRef: corev1.LocalObjectReference{ Name: "offline-test-secret", @@ -242,9 +242,9 @@ var _ = Describe("Repo Config", func() { }, }, }, - OnlineStore: &feastdevv1alpha1.OnlineStore{ - Persistence: &feastdevv1alpha1.OnlineStorePersistence{ - DBPersistence: &feastdevv1alpha1.OnlineStoreDBStorePersistence{ + OnlineStore: &feastdevv1.OnlineStore{ + Persistence: &feastdevv1.OnlineStorePersistence{ + DBPersistence: &feastdevv1.OnlineStoreDBStorePersistence{ Type: string(OnlineDBPersistenceSnowflakeConfigType), SecretRef: corev1.LocalObjectReference{ Name: "online-test-secret", @@ -252,10 +252,10 @@ var _ = Describe("Repo Config", func() { }, }, }, - Registry: &feastdevv1alpha1.Registry{ - Local: &feastdevv1alpha1.LocalRegistryConfig{ - Persistence: &feastdevv1alpha1.RegistryPersistence{ - DBPersistence: &feastdevv1alpha1.RegistryDBStorePersistence{ + Registry: &feastdevv1.Registry{ + Local: &feastdevv1.LocalRegistryConfig{ + Persistence: &feastdevv1.RegistryPersistence{ + DBPersistence: &feastdevv1.RegistryDBStorePersistence{ Type: string(RegistryDBPersistenceSnowflakeConfigType), SecretRef: corev1.LocalObjectReference{ Name: "registry-test-secret", @@ -297,8 +297,8 @@ var _ = Describe("Repo Config", func() { featureStore := minimalFeatureStore() By("Having invalid server oidc authorization") - featureStore.Spec.AuthzConfig = &feastdevv1alpha1.AuthzConfig{ - OidcAuthz: &feastdevv1alpha1.OidcAuthz{ + featureStore.Spec.AuthzConfig = &feastdevv1.AuthzConfig{ + OidcAuthz: &feastdevv1.OidcAuthz{ SecretRef: corev1.LocalObjectReference{ Name: "oidc-secret", }, @@ -324,8 +324,8 @@ var _ = Describe("Repo Config", func() { Expect(err).ToNot(HaveOccurred()) By("Having invalid client oidc authorization") - featureStore.Spec.AuthzConfig = &feastdevv1alpha1.AuthzConfig{ - OidcAuthz: &feastdevv1alpha1.OidcAuthz{ + featureStore.Spec.AuthzConfig = &feastdevv1.AuthzConfig{ + OidcAuthz: &feastdevv1.OidcAuthz{ SecretRef: corev1.LocalObjectReference{ Name: "oidc-secret", }, @@ -356,28 +356,28 @@ var _ = Describe("Repo Config", func() { var emptyOfflineStoreConfig = OfflineStoreConfig{} var emptyRegistryConfig = RegistryConfig{} -func minimalFeatureStore() *feastdevv1alpha1.FeatureStore { - return &feastdevv1alpha1.FeatureStore{ +func minimalFeatureStore() *feastdevv1.FeatureStore { + return &feastdevv1.FeatureStore{ ObjectMeta: metav1.ObjectMeta{Name: "test"}, - Spec: feastdevv1alpha1.FeatureStoreSpec{ + Spec: feastdevv1.FeatureStoreSpec{ FeastProject: projectName, }, } } -func minimalFeatureStoreWithAllServers() *feastdevv1alpha1.FeatureStore { +func minimalFeatureStoreWithAllServers() *feastdevv1.FeatureStore { feast := minimalFeatureStore() // onlineStore configured by default - feast.Spec.Services = &feastdevv1alpha1.FeatureStoreServices{ - OfflineStore: &feastdevv1alpha1.OfflineStore{ - Server: &feastdevv1alpha1.ServerConfigs{}, + feast.Spec.Services = &feastdevv1.FeatureStoreServices{ + OfflineStore: &feastdevv1.OfflineStore{ + Server: &feastdevv1.ServerConfigs{}, }, - Registry: &feastdevv1alpha1.Registry{ - Local: &feastdevv1alpha1.LocalRegistryConfig{ - Server: &feastdevv1alpha1.RegistryServerConfigs{}, + Registry: &feastdevv1.Registry{ + Local: &feastdevv1.LocalRegistryConfig{ + Server: &feastdevv1.RegistryServerConfigs{}, }, }, - UI: &feastdevv1alpha1.ServerConfigs{}, + UI: &feastdevv1.ServerConfigs{}, } return feast } @@ -464,50 +464,50 @@ var _ = Describe("TLS Certificate Path Configuration", func() { Context("in getClientRepoConfig", func() { It("should use individual service certificate paths when no custom CA bundle", func() { // Create a feature store with TLS enabled - featureStore := &feastdevv1alpha1.FeatureStore{ - Status: feastdevv1alpha1.FeatureStoreStatus{ - ServiceHostnames: feastdevv1alpha1.ServiceHostnames{ + featureStore := &feastdevv1.FeatureStore{ + Status: feastdevv1.FeatureStoreStatus{ + ServiceHostnames: feastdevv1.ServiceHostnames{ OfflineStore: "offline.example.com:443", OnlineStore: "online.example.com:443", Registry: "registry.example.com:443", }, - Applied: feastdevv1alpha1.FeatureStoreSpec{ - Services: &feastdevv1alpha1.FeatureStoreServices{ - OfflineStore: &feastdevv1alpha1.OfflineStore{ - Server: &feastdevv1alpha1.ServerConfigs{ - TLS: &feastdevv1alpha1.TlsConfigs{ + Applied: feastdevv1.FeatureStoreSpec{ + Services: &feastdevv1.FeatureStoreServices{ + OfflineStore: &feastdevv1.OfflineStore{ + Server: &feastdevv1.ServerConfigs{ + TLS: &feastdevv1.TlsConfigs{ SecretRef: &corev1.LocalObjectReference{Name: "offline-tls"}, - SecretKeyNames: feastdevv1alpha1.SecretKeyNames{ + SecretKeyNames: feastdevv1.SecretKeyNames{ TlsCrt: "tls.crt", }, }, }, }, - OnlineStore: &feastdevv1alpha1.OnlineStore{ - Server: &feastdevv1alpha1.ServerConfigs{ - TLS: &feastdevv1alpha1.TlsConfigs{ + OnlineStore: &feastdevv1.OnlineStore{ + Server: &feastdevv1.ServerConfigs{ + TLS: &feastdevv1.TlsConfigs{ SecretRef: &corev1.LocalObjectReference{Name: "online-tls"}, - SecretKeyNames: feastdevv1alpha1.SecretKeyNames{ + SecretKeyNames: feastdevv1.SecretKeyNames{ TlsCrt: "tls.crt", }, }, }, }, - UI: &feastdevv1alpha1.ServerConfigs{ - TLS: &feastdevv1alpha1.TlsConfigs{ + UI: &feastdevv1.ServerConfigs{ + TLS: &feastdevv1.TlsConfigs{ SecretRef: &corev1.LocalObjectReference{Name: "ui-tls"}, - SecretKeyNames: feastdevv1alpha1.SecretKeyNames{ + SecretKeyNames: feastdevv1.SecretKeyNames{ TlsCrt: "tls.crt", }, }, }, - Registry: &feastdevv1alpha1.Registry{ - Local: &feastdevv1alpha1.LocalRegistryConfig{ - Server: &feastdevv1alpha1.RegistryServerConfigs{ - ServerConfigs: feastdevv1alpha1.ServerConfigs{ - TLS: &feastdevv1alpha1.TlsConfigs{ + Registry: &feastdevv1.Registry{ + Local: &feastdevv1.LocalRegistryConfig{ + Server: &feastdevv1.RegistryServerConfigs{ + ServerConfigs: feastdevv1.ServerConfigs{ + TLS: &feastdevv1.TlsConfigs{ SecretRef: &corev1.LocalObjectReference{Name: "registry-tls"}, - SecretKeyNames: feastdevv1alpha1.SecretKeyNames{ + SecretKeyNames: feastdevv1.SecretKeyNames{ TlsCrt: "tls.crt", }, }, @@ -533,51 +533,51 @@ var _ = Describe("TLS Certificate Path Configuration", func() { It("should use custom CA bundle path when available", func() { // This test would require a full FeastServices setup with custom CA bundle // For now, we verify the function signature and basic behavior - featureStore := &feastdevv1alpha1.FeatureStore{ - Status: feastdevv1alpha1.FeatureStoreStatus{ - ServiceHostnames: feastdevv1alpha1.ServiceHostnames{ + featureStore := &feastdevv1.FeatureStore{ + Status: feastdevv1.FeatureStoreStatus{ + ServiceHostnames: feastdevv1.ServiceHostnames{ OfflineStore: "offline.example.com:443", OnlineStore: "online.example.com:443", Registry: "registry.example.com:443", UI: "ui.example.com:443", }, - Applied: feastdevv1alpha1.FeatureStoreSpec{ - Services: &feastdevv1alpha1.FeatureStoreServices{ - OfflineStore: &feastdevv1alpha1.OfflineStore{ - Server: &feastdevv1alpha1.ServerConfigs{ - TLS: &feastdevv1alpha1.TlsConfigs{ + Applied: feastdevv1.FeatureStoreSpec{ + Services: &feastdevv1.FeatureStoreServices{ + OfflineStore: &feastdevv1.OfflineStore{ + Server: &feastdevv1.ServerConfigs{ + TLS: &feastdevv1.TlsConfigs{ SecretRef: &corev1.LocalObjectReference{Name: "offline-tls"}, - SecretKeyNames: feastdevv1alpha1.SecretKeyNames{ + SecretKeyNames: feastdevv1.SecretKeyNames{ TlsCrt: "tls.crt", }, }, }, }, - OnlineStore: &feastdevv1alpha1.OnlineStore{ - Server: &feastdevv1alpha1.ServerConfigs{ - TLS: &feastdevv1alpha1.TlsConfigs{ + OnlineStore: &feastdevv1.OnlineStore{ + Server: &feastdevv1.ServerConfigs{ + TLS: &feastdevv1.TlsConfigs{ SecretRef: &corev1.LocalObjectReference{Name: "online-tls"}, - SecretKeyNames: feastdevv1alpha1.SecretKeyNames{ + SecretKeyNames: feastdevv1.SecretKeyNames{ TlsCrt: "tls.crt", }, }, }, }, - UI: &feastdevv1alpha1.ServerConfigs{ - TLS: &feastdevv1alpha1.TlsConfigs{ + UI: &feastdevv1.ServerConfigs{ + TLS: &feastdevv1.TlsConfigs{ SecretRef: &corev1.LocalObjectReference{Name: "ui-tls"}, - SecretKeyNames: feastdevv1alpha1.SecretKeyNames{ + SecretKeyNames: feastdevv1.SecretKeyNames{ TlsCrt: "tls.crt", }, }, }, - Registry: &feastdevv1alpha1.Registry{ - Local: &feastdevv1alpha1.LocalRegistryConfig{ - Server: &feastdevv1alpha1.RegistryServerConfigs{ - ServerConfigs: feastdevv1alpha1.ServerConfigs{ - TLS: &feastdevv1alpha1.TlsConfigs{ + Registry: &feastdevv1.Registry{ + Local: &feastdevv1.LocalRegistryConfig{ + Server: &feastdevv1.RegistryServerConfigs{ + ServerConfigs: feastdevv1.ServerConfigs{ + TLS: &feastdevv1.TlsConfigs{ SecretRef: &corev1.LocalObjectReference{Name: "registry-tls"}, - SecretKeyNames: feastdevv1alpha1.SecretKeyNames{ + SecretKeyNames: feastdevv1.SecretKeyNames{ TlsCrt: "tls.crt", }, }, diff --git a/infra/feast-operator/internal/controller/services/services.go b/infra/feast-operator/internal/controller/services/services.go index 8290420f7ff..39a48b1fe58 100644 --- a/infra/feast-operator/internal/controller/services/services.go +++ b/infra/feast-operator/internal/controller/services/services.go @@ -21,7 +21,7 @@ import ( "strconv" "strings" - feastdevv1alpha1 "github.com/feast-dev/feast/infra/feast-operator/api/v1alpha1" + feastdevv1 "github.com/feast-dev/feast/infra/feast-operator/api/v1" routev1 "github.com/openshift/api/route/v1" "github.com/feast-dev/feast/infra/feast-operator/internal/controller/handler" @@ -151,7 +151,7 @@ func (feast *FeastServices) Deploy() error { return nil } -func (feast *FeastServices) validateRegistryPersistence(registryPersistence *feastdevv1alpha1.RegistryPersistence) error { +func (feast *FeastServices) validateRegistryPersistence(registryPersistence *feastdevv1.RegistryPersistence) error { if registryPersistence != nil { dbPersistence := registryPersistence.DBPersistence @@ -172,7 +172,7 @@ func (feast *FeastServices) validateRegistryPersistence(registryPersistence *fea return nil } -func (feast *FeastServices) validateOnlineStorePersistence(onlinePersistence *feastdevv1alpha1.OnlineStorePersistence) error { +func (feast *FeastServices) validateOnlineStorePersistence(onlinePersistence *feastdevv1.OnlineStorePersistence) error { if onlinePersistence != nil { dbPersistence := onlinePersistence.DBPersistence @@ -193,7 +193,7 @@ func (feast *FeastServices) validateOnlineStorePersistence(onlinePersistence *fe return nil } -func (feast *FeastServices) validateOfflineStorePersistence(offlinePersistence *feastdevv1alpha1.OfflineStorePersistence) error { +func (feast *FeastServices) validateOfflineStorePersistence(offlinePersistence *feastdevv1.OfflineStorePersistence) error { if offlinePersistence != nil { filePersistence := offlinePersistence.FilePersistence dbPersistence := offlinePersistence.DBPersistence @@ -359,7 +359,7 @@ func (feast *FeastServices) createRoute(feastType FeastServiceType) error { return nil } -func (feast *FeastServices) createPVC(pvcCreate *feastdevv1alpha1.PvcCreate, feastType FeastServiceType) error { +func (feast *FeastServices) createPVC(pvcCreate *feastdevv1.PvcCreate, feastType FeastServiceType) error { logger := log.FromContext(feast.Handler.Context) pvc, err := feast.createNewPVC(pvcCreate, feastType) if err != nil { @@ -501,7 +501,7 @@ func (feast *FeastServices) setContainer(containers *[]corev1.Container, feastTy } } -func getContainer(name, workingDir string, cmd []string, containerConfigs feastdevv1alpha1.ContainerConfigs, fsYamlB64 string) *corev1.Container { +func getContainer(name, workingDir string, cmd []string, containerConfigs feastdevv1.ContainerConfigs, fsYamlB64 string) *corev1.Container { container := &corev1.Container{ Name: name, Command: cmd, @@ -779,7 +779,7 @@ func (feast *FeastServices) setServiceAccount(sa *corev1.ServiceAccount) error { return controllerutil.SetControllerReference(feast.Handler.FeatureStore, sa, feast.Handler.Scheme) } -func (feast *FeastServices) createNewPVC(pvcCreate *feastdevv1alpha1.PvcCreate, feastType FeastServiceType) (*corev1.PersistentVolumeClaim, error) { +func (feast *FeastServices) createNewPVC(pvcCreate *feastdevv1.PvcCreate, feastType FeastServiceType) (*corev1.PersistentVolumeClaim, error) { pvc := feast.initPVC(feastType) pvc.Spec = corev1.PersistentVolumeClaimSpec{ @@ -792,7 +792,7 @@ func (feast *FeastServices) createNewPVC(pvcCreate *feastdevv1alpha1.PvcCreate, return pvc, controllerutil.SetControllerReference(feast.Handler.FeatureStore, pvc, feast.Handler.Scheme) } -func (feast *FeastServices) getServerConfigs(feastType FeastServiceType) *feastdevv1alpha1.ServerConfigs { +func (feast *FeastServices) getServerConfigs(feastType FeastServiceType) *feastdevv1.ServerConfigs { appliedServices := feast.Handler.FeatureStore.Status.Applied.Services switch feastType { case OfflineFeastType: @@ -904,11 +904,11 @@ func (feast *FeastServices) GetDeployment() (appsv1.Deployment, error) { } // GetFeastServiceName returns the feast service object name based on service type -func GetFeastServiceName(featureStore *feastdevv1alpha1.FeatureStore, feastType FeastServiceType) string { +func GetFeastServiceName(featureStore *feastdevv1.FeatureStore, feastType FeastServiceType) string { return GetFeastName(featureStore) + "-" + string(feastType) } -func GetFeastName(featureStore *feastdevv1alpha1.FeatureStore) string { +func GetFeastName(featureStore *feastdevv1.FeatureStore) string { return handler.FeastPrefix + featureStore.Name } @@ -925,7 +925,7 @@ func (feast *FeastServices) getLabels() map[string]string { } func (feast *FeastServices) setServiceHostnames() error { - feast.Handler.FeatureStore.Status.ServiceHostnames = feastdevv1alpha1.ServiceHostnames{} + feast.Handler.FeatureStore.Status.ServiceHostnames = feastdevv1.ServiceHostnames{} domain := svcDomain + ":" if feast.isOfflineServer() { objMeta := feast.initFeastSvc(OfflineFeastType) @@ -984,7 +984,7 @@ func (feast *FeastServices) setRemoteRegistryURL() error { // referenced/remote registry must use the local registry server option and be in a 'Ready' state. if remoteFeast != nil && remoteFeast.isRegistryServer() && - apimeta.IsStatusConditionTrue(remoteFeast.Handler.FeatureStore.Status.Conditions, feastdevv1alpha1.RegistryReadyType) && + apimeta.IsStatusConditionTrue(remoteFeast.Handler.FeatureStore.Status.Conditions, feastdevv1.RegistryReadyType) && len(remoteFeast.Handler.FeatureStore.Status.ServiceHostnames.Registry) > 0 { // Check if gRPC server is enabled if !remoteFeast.isRegistryGrpcEnabled() { @@ -1006,7 +1006,7 @@ func (feast *FeastServices) getRemoteRegistryFeastHandler() (*FeastServices, err if nsName == crNsName { return nil, errors.New("FeatureStore '" + crNsName.Name + "' can't reference itself in `spec.services.registry.remote.feastRef`") } - remoteFeastObj := &feastdevv1alpha1.FeatureStore{} + remoteFeastObj := &feastdevv1.FeatureStore{} if err := feast.Handler.Client.Get(feast.Handler.Context, nsName, remoteFeastObj); err != nil { if apierrors.IsNotFound(err) { return nil, errors.New("Referenced FeatureStore '" + feastRemoteRef.Name + "' was not found") @@ -1131,7 +1131,7 @@ func (feast *FeastServices) initRoute(feastType FeastServiceType) *routev1.Route return route } -func applyCtrConfigs(container *corev1.Container, containerConfigs feastdevv1alpha1.ContainerConfigs) { +func applyCtrConfigs(container *corev1.Container, containerConfigs feastdevv1.ContainerConfigs) { if containerConfigs.DefaultCtrConfigs.Image != nil { container.Image = *containerConfigs.DefaultCtrConfigs.Image } @@ -1158,7 +1158,7 @@ func (feast *FeastServices) mountPvcConfigs(podSpec *corev1.PodSpec) { } } -func (feast *FeastServices) mountPvcConfig(podSpec *corev1.PodSpec, pvcConfig *feastdevv1alpha1.PvcConfig, feastType FeastServiceType) { +func (feast *FeastServices) mountPvcConfig(podSpec *corev1.PodSpec, pvcConfig *feastdevv1.PvcConfig, feastType FeastServiceType) { if podSpec != nil && pvcConfig != nil { volName := feast.initPVC(feastType).Name pvcName := volName @@ -1229,21 +1229,21 @@ func mountEmptyDirVolume(podSpec *corev1.PodSpec) { } } -func getTargetPort(feastType FeastServiceType, tls *feastdevv1alpha1.TlsConfigs) int32 { +func getTargetPort(feastType FeastServiceType, tls *feastdevv1.TlsConfigs) int32 { if tls.IsTLS() { return FeastServiceConstants[feastType].TargetHttpsPort } return FeastServiceConstants[feastType].TargetHttpPort } -func getTargetRestPort(feastType FeastServiceType, tls *feastdevv1alpha1.TlsConfigs) int32 { +func getTargetRestPort(feastType FeastServiceType, tls *feastdevv1.TlsConfigs) int32 { if tls.IsTLS() { return FeastServiceConstants[feastType].TargetRestHttpsPort } return FeastServiceConstants[feastType].TargetRestHttpPort } -func (feast *FeastServices) getProbeHandler(feastType FeastServiceType, tls *feastdevv1alpha1.TlsConfigs) corev1.ProbeHandler { +func (feast *FeastServices) getProbeHandler(feastType FeastServiceType, tls *feastdevv1.TlsConfigs) corev1.ProbeHandler { targetPort := getTargetPort(feastType, tls) if feastType == RegistryFeastType { diff --git a/infra/feast-operator/internal/controller/services/services_test.go b/infra/feast-operator/internal/controller/services/services_test.go index c7d2eade3dc..e9634f7f07d 100644 --- a/infra/feast-operator/internal/controller/services/services_test.go +++ b/infra/feast-operator/internal/controller/services/services_test.go @@ -19,7 +19,7 @@ package services import ( "context" - feastdevv1alpha1 "github.com/feast-dev/feast/infra/feast-operator/api/v1alpha1" + feastdevv1 "github.com/feast-dev/feast/infra/feast-operator/api/v1" "github.com/feast-dev/feast/infra/feast-operator/internal/controller/handler" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -34,12 +34,12 @@ func ptr[T any](v T) *T { } func (feast *FeastServices) refreshFeatureStore(ctx context.Context, key types.NamespacedName) { - fs := &feastdevv1alpha1.FeatureStore{} + fs := &feastdevv1.FeatureStore{} Expect(k8sClient.Get(ctx, key, fs)).To(Succeed()) feast.Handler.FeatureStore = fs } -func applySpecToStatus(fs *feastdevv1alpha1.FeatureStore) { +func applySpecToStatus(fs *feastdevv1.FeatureStore) { fs.Status.Applied.Services = fs.Spec.Services.DeepCopy() fs.Status.Applied.FeastProject = fs.Spec.FeastProject Expect(k8sClient.Status().Update(context.Background(), fs)).To(Succeed()) @@ -47,7 +47,7 @@ func applySpecToStatus(fs *feastdevv1alpha1.FeatureStore) { var _ = Describe("Registry Service", func() { var ( - featureStore *feastdevv1alpha1.FeatureStore + featureStore *feastdevv1.FeatureStore feast *FeastServices typeNamespacedName types.NamespacedName ctx context.Context @@ -69,20 +69,20 @@ var _ = Describe("Registry Service", func() { Namespace: "default", } - featureStore = &feastdevv1alpha1.FeatureStore{ + featureStore = &feastdevv1.FeatureStore{ ObjectMeta: metav1.ObjectMeta{ Name: typeNamespacedName.Name, Namespace: typeNamespacedName.Namespace, }, - Spec: feastdevv1alpha1.FeatureStoreSpec{ + Spec: feastdevv1.FeatureStoreSpec{ FeastProject: "testproject", - Services: &feastdevv1alpha1.FeatureStoreServices{ - Registry: &feastdevv1alpha1.Registry{ - Local: &feastdevv1alpha1.LocalRegistryConfig{ - Server: &feastdevv1alpha1.RegistryServerConfigs{ - ServerConfigs: feastdevv1alpha1.ServerConfigs{ - ContainerConfigs: feastdevv1alpha1.ContainerConfigs{ - DefaultCtrConfigs: feastdevv1alpha1.DefaultCtrConfigs{ + Services: &feastdevv1.FeatureStoreServices{ + Registry: &feastdevv1.Registry{ + Local: &feastdevv1.LocalRegistryConfig{ + Server: &feastdevv1.RegistryServerConfigs{ + ServerConfigs: feastdevv1.ServerConfigs{ + ContainerConfigs: feastdevv1.ContainerConfigs{ + DefaultCtrConfigs: feastdevv1.DefaultCtrConfigs{ Image: ptr("test-image"), }, }, @@ -244,13 +244,13 @@ var _ = Describe("Registry Service", func() { "node-type": "online", "zone": "us-west-1a", } - featureStore.Spec.Services.OnlineStore = &feastdevv1alpha1.OnlineStore{ - Server: &feastdevv1alpha1.ServerConfigs{ - ContainerConfigs: feastdevv1alpha1.ContainerConfigs{ - DefaultCtrConfigs: feastdevv1alpha1.DefaultCtrConfigs{ + featureStore.Spec.Services.OnlineStore = &feastdevv1.OnlineStore{ + Server: &feastdevv1.ServerConfigs{ + ContainerConfigs: feastdevv1.ContainerConfigs{ + DefaultCtrConfigs: feastdevv1.DefaultCtrConfigs{ Image: ptr("test-image"), }, - OptionalCtrConfigs: feastdevv1alpha1.OptionalCtrConfigs{ + OptionalCtrConfigs: feastdevv1.OptionalCtrConfigs{ NodeSelector: &onlineNodeSelector, }, }, @@ -281,12 +281,12 @@ var _ = Describe("Registry Service", func() { uiNodeSelector := map[string]string{ "node-type": "ui", } - featureStore.Spec.Services.UI = &feastdevv1alpha1.ServerConfigs{ - ContainerConfigs: feastdevv1alpha1.ContainerConfigs{ - DefaultCtrConfigs: feastdevv1alpha1.DefaultCtrConfigs{ + featureStore.Spec.Services.UI = &feastdevv1.ServerConfigs{ + ContainerConfigs: feastdevv1.ContainerConfigs{ + DefaultCtrConfigs: feastdevv1.DefaultCtrConfigs{ Image: ptr("test-image"), }, - OptionalCtrConfigs: feastdevv1alpha1.OptionalCtrConfigs{ + OptionalCtrConfigs: feastdevv1.OptionalCtrConfigs{ NodeSelector: &uiNodeSelector, }, }, @@ -328,13 +328,13 @@ var _ = Describe("Registry Service", func() { onlineNodeSelector := map[string]string{ "node-type": "online", } - featureStore.Spec.Services.OnlineStore = &feastdevv1alpha1.OnlineStore{ - Server: &feastdevv1alpha1.ServerConfigs{ - ContainerConfigs: feastdevv1alpha1.ContainerConfigs{ - DefaultCtrConfigs: feastdevv1alpha1.DefaultCtrConfigs{ + featureStore.Spec.Services.OnlineStore = &feastdevv1.OnlineStore{ + Server: &feastdevv1.ServerConfigs{ + ContainerConfigs: feastdevv1.ContainerConfigs{ + DefaultCtrConfigs: feastdevv1.DefaultCtrConfigs{ Image: ptr("test-image"), }, - OptionalCtrConfigs: feastdevv1alpha1.OptionalCtrConfigs{ + OptionalCtrConfigs: feastdevv1.OptionalCtrConfigs{ NodeSelector: &onlineNodeSelector, }, }, @@ -346,12 +346,12 @@ var _ = Describe("Registry Service", func() { "node-type": "ui", "zone": "us-east-1", } - featureStore.Spec.Services.UI = &feastdevv1alpha1.ServerConfigs{ - ContainerConfigs: feastdevv1alpha1.ContainerConfigs{ - DefaultCtrConfigs: feastdevv1alpha1.DefaultCtrConfigs{ + featureStore.Spec.Services.UI = &feastdevv1.ServerConfigs{ + ContainerConfigs: feastdevv1.ContainerConfigs{ + DefaultCtrConfigs: feastdevv1.DefaultCtrConfigs{ Image: ptr("test-image"), }, - OptionalCtrConfigs: feastdevv1alpha1.OptionalCtrConfigs{ + OptionalCtrConfigs: feastdevv1.OptionalCtrConfigs{ NodeSelector: &uiNodeSelector, }, }, @@ -376,8 +376,8 @@ var _ = Describe("Registry Service", func() { }) It("should enable metrics on the online service when configured", func() { - featureStore.Spec.Services.OnlineStore = &feastdevv1alpha1.OnlineStore{ - Server: &feastdevv1alpha1.ServerConfigs{Metrics: ptr(true)}, + featureStore.Spec.Services.OnlineStore = &feastdevv1.OnlineStore{ + Server: &feastdevv1.ServerConfigs{Metrics: ptr(true)}, } Expect(k8sClient.Update(ctx, featureStore)).To(Succeed()) diff --git a/infra/feast-operator/internal/controller/services/services_types.go b/infra/feast-operator/internal/controller/services/services_types.go index b0f99756fa7..5997307ab1c 100644 --- a/infra/feast-operator/internal/controller/services/services_types.go +++ b/infra/feast-operator/internal/controller/services/services_types.go @@ -18,7 +18,7 @@ package services import ( "github.com/feast-dev/feast/infra/feast-operator/api/feastversion" - feastdevv1alpha1 "github.com/feast-dev/feast/infra/feast-operator/api/v1alpha1" + feastdevv1 "github.com/feast-dev/feast/infra/feast-operator/api/v1" handler "github.com/feast-dev/feast/infra/feast-operator/internal/controller/handler" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -99,8 +99,8 @@ var ( DefaultImage = "quay.io/feastdev/feature-server:" + feastversion.FeastVersion DefaultCronJobImage = "quay.io/openshift/origin-cli:4.17" DefaultPVCAccessModes = []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce} - NameLabelKey = feastdevv1alpha1.GroupVersion.Group + "/name" - ServiceTypeLabelKey = feastdevv1alpha1.GroupVersion.Group + "/service-type" + NameLabelKey = feastdevv1.GroupVersion.Group + "/name" + ServiceTypeLabelKey = feastdevv1.GroupVersion.Group + "/service-type" FeastServiceConstants = map[FeastServiceType]deploymentSettings{ OfflineFeastType: { @@ -130,80 +130,80 @@ var ( FeastServiceConditions = map[FeastServiceType]map[metav1.ConditionStatus]metav1.Condition{ OfflineFeastType: { metav1.ConditionTrue: { - Type: feastdevv1alpha1.OfflineStoreReadyType, + Type: feastdevv1.OfflineStoreReadyType, Status: metav1.ConditionTrue, - Reason: feastdevv1alpha1.ReadyReason, - Message: feastdevv1alpha1.OfflineStoreReadyMessage, + Reason: feastdevv1.ReadyReason, + Message: feastdevv1.OfflineStoreReadyMessage, }, metav1.ConditionFalse: { - Type: feastdevv1alpha1.OfflineStoreReadyType, + Type: feastdevv1.OfflineStoreReadyType, Status: metav1.ConditionFalse, - Reason: feastdevv1alpha1.OfflineStoreFailedReason, + Reason: feastdevv1.OfflineStoreFailedReason, }, }, OnlineFeastType: { metav1.ConditionTrue: { - Type: feastdevv1alpha1.OnlineStoreReadyType, + Type: feastdevv1.OnlineStoreReadyType, Status: metav1.ConditionTrue, - Reason: feastdevv1alpha1.ReadyReason, - Message: feastdevv1alpha1.OnlineStoreReadyMessage, + Reason: feastdevv1.ReadyReason, + Message: feastdevv1.OnlineStoreReadyMessage, }, metav1.ConditionFalse: { - Type: feastdevv1alpha1.OnlineStoreReadyType, + Type: feastdevv1.OnlineStoreReadyType, Status: metav1.ConditionFalse, - Reason: feastdevv1alpha1.OnlineStoreFailedReason, + Reason: feastdevv1.OnlineStoreFailedReason, }, }, RegistryFeastType: { metav1.ConditionTrue: { - Type: feastdevv1alpha1.RegistryReadyType, + Type: feastdevv1.RegistryReadyType, Status: metav1.ConditionTrue, - Reason: feastdevv1alpha1.ReadyReason, - Message: feastdevv1alpha1.RegistryReadyMessage, + Reason: feastdevv1.ReadyReason, + Message: feastdevv1.RegistryReadyMessage, }, metav1.ConditionFalse: { - Type: feastdevv1alpha1.RegistryReadyType, + Type: feastdevv1.RegistryReadyType, Status: metav1.ConditionFalse, - Reason: feastdevv1alpha1.RegistryFailedReason, + Reason: feastdevv1.RegistryFailedReason, }, }, UIFeastType: { metav1.ConditionTrue: { - Type: feastdevv1alpha1.UIReadyType, + Type: feastdevv1.UIReadyType, Status: metav1.ConditionTrue, - Reason: feastdevv1alpha1.ReadyReason, - Message: feastdevv1alpha1.UIReadyMessage, + Reason: feastdevv1.ReadyReason, + Message: feastdevv1.UIReadyMessage, }, metav1.ConditionFalse: { - Type: feastdevv1alpha1.UIReadyType, + Type: feastdevv1.UIReadyType, Status: metav1.ConditionFalse, - Reason: feastdevv1alpha1.UIFailedReason, + Reason: feastdevv1.UIFailedReason, }, }, ClientFeastType: { metav1.ConditionTrue: { - Type: feastdevv1alpha1.ClientReadyType, + Type: feastdevv1.ClientReadyType, Status: metav1.ConditionTrue, - Reason: feastdevv1alpha1.ReadyReason, - Message: feastdevv1alpha1.ClientReadyMessage, + Reason: feastdevv1.ReadyReason, + Message: feastdevv1.ClientReadyMessage, }, metav1.ConditionFalse: { - Type: feastdevv1alpha1.ClientReadyType, + Type: feastdevv1.ClientReadyType, Status: metav1.ConditionFalse, - Reason: feastdevv1alpha1.ClientFailedReason, + Reason: feastdevv1.ClientFailedReason, }, }, CronJobFeastType: { metav1.ConditionTrue: { - Type: feastdevv1alpha1.CronJobReadyType, + Type: feastdevv1.CronJobReadyType, Status: metav1.ConditionTrue, - Reason: feastdevv1alpha1.ReadyReason, - Message: feastdevv1alpha1.CronJobReadyMessage, + Reason: feastdevv1.ReadyReason, + Message: feastdevv1.CronJobReadyMessage, }, metav1.ConditionFalse: { - Type: feastdevv1alpha1.CronJobReadyType, + Type: feastdevv1.CronJobReadyType, Status: metav1.ConditionFalse, - Reason: feastdevv1alpha1.CronJobFailedReason, + Reason: feastdevv1.CronJobFailedReason, }, }, } diff --git a/infra/feast-operator/internal/controller/services/suite_test.go b/infra/feast-operator/internal/controller/services/suite_test.go index 5e922bc7e4a..a3d5bb3dae8 100644 --- a/infra/feast-operator/internal/controller/services/suite_test.go +++ b/infra/feast-operator/internal/controller/services/suite_test.go @@ -31,7 +31,7 @@ import ( logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" - feastdevv1alpha1 "github.com/feast-dev/feast/infra/feast-operator/api/v1alpha1" + feastdevv1 "github.com/feast-dev/feast/infra/feast-operator/api/v1" // +kubebuilder:scaffold:imports ) @@ -68,7 +68,7 @@ var _ = BeforeSuite(func() { Expect(err).NotTo(HaveOccurred()) Expect(cfg).NotTo(BeNil()) - err = feastdevv1alpha1.AddToScheme(scheme.Scheme) + err = feastdevv1.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred()) // +kubebuilder:scaffold:scheme diff --git a/infra/feast-operator/internal/controller/services/tls.go b/infra/feast-operator/internal/controller/services/tls.go index 4b209e64e8c..4a50697c5a1 100644 --- a/infra/feast-operator/internal/controller/services/tls.go +++ b/infra/feast-operator/internal/controller/services/tls.go @@ -23,7 +23,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log" - feastdevv1alpha1 "github.com/feast-dev/feast/infra/feast-operator/api/v1alpha1" + feastdevv1 "github.com/feast-dev/feast/infra/feast-operator/api/v1" corev1 "k8s.io/api/core/v1" ) @@ -50,21 +50,21 @@ func (feast *FeastServices) setTlsDefaults() error { func (feast *FeastServices) setOpenshiftTls() error { appliedServices := feast.Handler.FeatureStore.Status.Applied.Services if feast.offlineOpenshiftTls() { - appliedServices.OfflineStore.Server.TLS = &feastdevv1alpha1.TlsConfigs{ + appliedServices.OfflineStore.Server.TLS = &feastdevv1.TlsConfigs{ SecretRef: &corev1.LocalObjectReference{ Name: feast.initFeastSvc(OfflineFeastType).Name + tlsNameSuffix, }, } } if feast.onlineOpenshiftTls() { - appliedServices.OnlineStore.Server.TLS = &feastdevv1alpha1.TlsConfigs{ + appliedServices.OnlineStore.Server.TLS = &feastdevv1.TlsConfigs{ SecretRef: &corev1.LocalObjectReference{ Name: feast.initFeastSvc(OnlineFeastType).Name + tlsNameSuffix, }, } } if feast.uiOpenshiftTls() { - appliedServices.UI.TLS = &feastdevv1alpha1.TlsConfigs{ + appliedServices.UI.TLS = &feastdevv1.TlsConfigs{ SecretRef: &corev1.LocalObjectReference{ Name: feast.initFeastSvc(UIFeastType).Name + tlsNameSuffix, }, @@ -77,21 +77,21 @@ func (feast *FeastServices) setOpenshiftTls() error { if grpcEnabled && restEnabled { // Both services enabled: Use gRPC service name as primary certificate // The certificate will include both hostnames as SANs via service annotations - appliedServices.Registry.Local.Server.TLS = &feastdevv1alpha1.TlsConfigs{ + appliedServices.Registry.Local.Server.TLS = &feastdevv1.TlsConfigs{ SecretRef: &corev1.LocalObjectReference{ Name: feast.initFeastSvc(RegistryFeastType).Name + tlsNameSuffix, }, } } else if grpcEnabled && !restEnabled { // Only gRPC enabled: Use gRPC service name - appliedServices.Registry.Local.Server.TLS = &feastdevv1alpha1.TlsConfigs{ + appliedServices.Registry.Local.Server.TLS = &feastdevv1.TlsConfigs{ SecretRef: &corev1.LocalObjectReference{ Name: feast.initFeastSvc(RegistryFeastType).Name + tlsNameSuffix, }, } } else if !grpcEnabled && restEnabled { // Only REST enabled: Use REST service name - appliedServices.Registry.Local.Server.TLS = &feastdevv1alpha1.TlsConfigs{ + appliedServices.Registry.Local.Server.TLS = &feastdevv1.TlsConfigs{ SecretRef: &corev1.LocalObjectReference{ Name: feast.initFeastRestSvc(RegistryFeastType).Name + tlsNameSuffix, }, @@ -100,7 +100,7 @@ func (feast *FeastServices) setOpenshiftTls() error { } else if remote, err := feast.remoteRegistryOpenshiftTls(); remote { // if the remote registry reference is using openshift's service serving certificates, we can use the injected service CA bundle configMap if appliedServices.Registry.Remote.TLS == nil { - appliedServices.Registry.Remote.TLS = &feastdevv1alpha1.TlsRemoteRegistryConfigs{ + appliedServices.Registry.Remote.TLS = &feastdevv1.TlsRemoteRegistryConfigs{ ConfigMapRef: corev1.LocalObjectReference{ Name: feast.initCaConfigMap().Name, }, @@ -135,7 +135,7 @@ func (feast *FeastServices) isOpenShiftTls(feastType FeastServiceType) (isOpenSh return } -func (feast *FeastServices) getTlsConfigs(feastType FeastServiceType) *feastdevv1alpha1.TlsConfigs { +func (feast *FeastServices) getTlsConfigs(feastType FeastServiceType) *feastdevv1.TlsConfigs { if serviceConfigs := feast.getServerConfigs(feastType); serviceConfigs != nil { return serviceConfigs.TLS } @@ -234,7 +234,7 @@ func (feast *FeastServices) mountTlsConfig(feastType FeastServiceType, podSpec * } } -func mountTlsRemoteRegistryConfig(podSpec *corev1.PodSpec, tls *feastdevv1alpha1.TlsRemoteRegistryConfigs) { +func mountTlsRemoteRegistryConfig(podSpec *corev1.PodSpec, tls *feastdevv1.TlsRemoteRegistryConfigs) { if tls != nil { volName := string(RegistryFeastType) + tlsNameSuffix podSpec.Volumes = append(podSpec.Volumes, corev1.Volume{ @@ -312,14 +312,14 @@ func (feast *FeastServices) GetCustomCertificatesBundle() CustomCertificatesBund return customCertificatesBundle } -func getPortStr(tls *feastdevv1alpha1.TlsConfigs) string { +func getPortStr(tls *feastdevv1.TlsConfigs) string { if tls.IsTLS() { return strconv.Itoa(HttpsPort) } return strconv.Itoa(HttpPort) } -func tlsDefaults(tls *feastdevv1alpha1.TlsConfigs) { +func tlsDefaults(tls *feastdevv1.TlsConfigs) { if tls.IsTLS() { if len(tls.SecretKeyNames.TlsCrt) == 0 { tls.SecretKeyNames.TlsCrt = "tls.crt" @@ -330,11 +330,11 @@ func tlsDefaults(tls *feastdevv1alpha1.TlsConfigs) { } } -func localRegistryTls(featureStore *feastdevv1alpha1.FeatureStore) bool { +func localRegistryTls(featureStore *feastdevv1.FeatureStore) bool { return IsRegistryServer(featureStore) && featureStore.Status.Applied.Services.Registry.Local.Server.TLS.IsTLS() } -func remoteRegistryTls(featureStore *feastdevv1alpha1.FeatureStore) bool { +func remoteRegistryTls(featureStore *feastdevv1.FeatureStore) bool { return isRemoteRegistry(featureStore) && featureStore.Status.Applied.Services.Registry.Remote.TLS != nil } diff --git a/infra/feast-operator/internal/controller/services/tls_test.go b/infra/feast-operator/internal/controller/services/tls_test.go index aa29fe97548..e5299d79119 100644 --- a/infra/feast-operator/internal/controller/services/tls_test.go +++ b/infra/feast-operator/internal/controller/services/tls_test.go @@ -19,7 +19,7 @@ package services import ( "context" - feastdevv1alpha1 "github.com/feast-dev/feast/infra/feast-operator/api/v1alpha1" + feastdevv1 "github.com/feast-dev/feast/infra/feast-operator/api/v1" "github.com/feast-dev/feast/infra/feast-operator/internal/controller/handler" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -34,9 +34,9 @@ var _ = Describe("TLS Config", func() { Context("When reconciling a FeatureStore", func() { scheme := runtime.NewScheme() utilruntime.Must(clientgoscheme.AddToScheme(scheme)) - utilruntime.Must(feastdevv1alpha1.AddToScheme(scheme)) + utilruntime.Must(feastdevv1.AddToScheme(scheme)) - secretKeyNames := feastdevv1alpha1.SecretKeyNames{ + secretKeyNames := feastdevv1.SecretKeyNames{ TlsCrt: "tls.crt", TlsKey: "tls.key", } @@ -53,11 +53,11 @@ var _ = Describe("TLS Config", func() { FeatureStore: minimalFeatureStore(), }, } - feast.Handler.FeatureStore.Spec.Services = &feastdevv1alpha1.FeatureStoreServices{ - Registry: &feastdevv1alpha1.Registry{ - Local: &feastdevv1alpha1.LocalRegistryConfig{ - Server: &feastdevv1alpha1.RegistryServerConfigs{ - ServerConfigs: feastdevv1alpha1.ServerConfigs{}, + feast.Handler.FeatureStore.Spec.Services = &feastdevv1.FeatureStoreServices{ + Registry: &feastdevv1.Registry{ + Local: &feastdevv1.LocalRegistryConfig{ + Server: &feastdevv1.RegistryServerConfigs{ + ServerConfigs: feastdevv1.ServerConfigs{}, }, }, }, @@ -84,11 +84,11 @@ var _ = Describe("TLS Config", func() { // registry service w/ openshift tls testSetIsOpenShift() feast.Handler.FeatureStore = minimalFeatureStore() - feast.Handler.FeatureStore.Spec.Services = &feastdevv1alpha1.FeatureStoreServices{ - Registry: &feastdevv1alpha1.Registry{ - Local: &feastdevv1alpha1.LocalRegistryConfig{ - Server: &feastdevv1alpha1.RegistryServerConfigs{ - ServerConfigs: feastdevv1alpha1.ServerConfigs{}, + feast.Handler.FeatureStore.Spec.Services = &feastdevv1.FeatureStoreServices{ + Registry: &feastdevv1.Registry{ + Local: &feastdevv1.LocalRegistryConfig{ + Server: &feastdevv1.RegistryServerConfigs{ + ServerConfigs: feastdevv1.ServerConfigs{}, }, }, }, @@ -183,22 +183,22 @@ var _ = Describe("TLS Config", func() { // registry service w/ tls and in an openshift cluster feast.Handler.FeatureStore = minimalFeatureStore() - feast.Handler.FeatureStore.Spec.Services = &feastdevv1alpha1.FeatureStoreServices{ - OnlineStore: &feastdevv1alpha1.OnlineStore{ - Server: &feastdevv1alpha1.ServerConfigs{ - TLS: &feastdevv1alpha1.TlsConfigs{}, + feast.Handler.FeatureStore.Spec.Services = &feastdevv1.FeatureStoreServices{ + OnlineStore: &feastdevv1.OnlineStore{ + Server: &feastdevv1.ServerConfigs{ + TLS: &feastdevv1.TlsConfigs{}, }, }, - UI: &feastdevv1alpha1.ServerConfigs{ - TLS: &feastdevv1alpha1.TlsConfigs{}, + UI: &feastdevv1.ServerConfigs{ + TLS: &feastdevv1.TlsConfigs{}, }, - Registry: &feastdevv1alpha1.Registry{ - Local: &feastdevv1alpha1.LocalRegistryConfig{ - Server: &feastdevv1alpha1.RegistryServerConfigs{ - ServerConfigs: feastdevv1alpha1.ServerConfigs{ - TLS: &feastdevv1alpha1.TlsConfigs{ + Registry: &feastdevv1.Registry{ + Local: &feastdevv1.LocalRegistryConfig{ + Server: &feastdevv1.RegistryServerConfigs{ + ServerConfigs: feastdevv1.ServerConfigs{ + TLS: &feastdevv1.TlsConfigs{ SecretRef: &corev1.LocalObjectReference{}, - SecretKeyNames: feastdevv1alpha1.SecretKeyNames{ + SecretKeyNames: feastdevv1.SecretKeyNames{ TlsCrt: "test.crt", }, }, @@ -238,21 +238,21 @@ var _ = Describe("TLS Config", func() { // all services w/ tls and in an openshift cluster feast.Handler.FeatureStore = minimalFeatureStoreWithAllServers() disable := true - feast.Handler.FeatureStore.Spec.Services.OnlineStore = &feastdevv1alpha1.OnlineStore{ - Server: &feastdevv1alpha1.ServerConfigs{ - TLS: &feastdevv1alpha1.TlsConfigs{ + feast.Handler.FeatureStore.Spec.Services.OnlineStore = &feastdevv1.OnlineStore{ + Server: &feastdevv1.ServerConfigs{ + TLS: &feastdevv1.TlsConfigs{ Disable: &disable, }, }, } - feast.Handler.FeatureStore.Spec.Services.UI.TLS = &feastdevv1alpha1.TlsConfigs{ + feast.Handler.FeatureStore.Spec.Services.UI.TLS = &feastdevv1.TlsConfigs{ Disable: &disable, } - feast.Handler.FeatureStore.Spec.Services.Registry = &feastdevv1alpha1.Registry{ - Local: &feastdevv1alpha1.LocalRegistryConfig{ - Server: &feastdevv1alpha1.RegistryServerConfigs{ - ServerConfigs: feastdevv1alpha1.ServerConfigs{ - TLS: &feastdevv1alpha1.TlsConfigs{ + feast.Handler.FeatureStore.Spec.Services.Registry = &feastdevv1.Registry{ + Local: &feastdevv1.LocalRegistryConfig{ + Server: &feastdevv1.RegistryServerConfigs{ + ServerConfigs: feastdevv1.ServerConfigs{ + TLS: &feastdevv1.TlsConfigs{ Disable: &disable, }, }, @@ -340,11 +340,11 @@ var _ = Describe("TLS Config", func() { feast.Handler.FeatureStore = minimalFeatureStore() restEnabled := true grpcEnabled := false - feast.Handler.FeatureStore.Spec.Services = &feastdevv1alpha1.FeatureStoreServices{ - Registry: &feastdevv1alpha1.Registry{ - Local: &feastdevv1alpha1.LocalRegistryConfig{ - Server: &feastdevv1alpha1.RegistryServerConfigs{ - ServerConfigs: feastdevv1alpha1.ServerConfigs{}, + feast.Handler.FeatureStore.Spec.Services = &feastdevv1.FeatureStoreServices{ + Registry: &feastdevv1.Registry{ + Local: &feastdevv1.LocalRegistryConfig{ + Server: &feastdevv1.RegistryServerConfigs{ + ServerConfigs: feastdevv1.ServerConfigs{}, RestAPI: &restEnabled, GRPC: &grpcEnabled, }, diff --git a/infra/feast-operator/internal/controller/services/util.go b/infra/feast-operator/internal/controller/services/util.go index 662308056e2..8e8a717aecf 100644 --- a/infra/feast-operator/internal/controller/services/util.go +++ b/infra/feast-operator/internal/controller/services/util.go @@ -8,7 +8,7 @@ import ( "strings" "github.com/feast-dev/feast/infra/feast-operator/api/feastversion" - feastdevv1alpha1 "github.com/feast-dev/feast/infra/feast-operator/api/v1alpha1" + feastdevv1 "github.com/feast-dev/feast/infra/feast-operator/api/v1" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -22,22 +22,22 @@ import ( var isOpenShift = false -func IsRegistryServer(featureStore *feastdevv1alpha1.FeatureStore) bool { +func IsRegistryServer(featureStore *feastdevv1.FeatureStore) bool { return IsLocalRegistry(featureStore) && featureStore.Status.Applied.Services.Registry.Local.Server != nil } -func IsLocalRegistry(featureStore *feastdevv1alpha1.FeatureStore) bool { +func IsLocalRegistry(featureStore *feastdevv1.FeatureStore) bool { appliedServices := featureStore.Status.Applied.Services return appliedServices != nil && appliedServices.Registry != nil && appliedServices.Registry.Local != nil } -func isRemoteRegistry(featureStore *feastdevv1alpha1.FeatureStore) bool { +func isRemoteRegistry(featureStore *feastdevv1.FeatureStore) bool { appliedServices := featureStore.Status.Applied.Services return appliedServices != nil && appliedServices.Registry != nil && appliedServices.Registry.Remote != nil } -func hasPvcConfig(featureStore *feastdevv1alpha1.FeatureStore, feastType FeastServiceType) (*feastdevv1alpha1.PvcConfig, bool) { - var pvcConfig *feastdevv1alpha1.PvcConfig +func hasPvcConfig(featureStore *feastdevv1.FeatureStore, feastType FeastServiceType) (*feastdevv1.PvcConfig, bool) { + var pvcConfig *feastdevv1.PvcConfig services := featureStore.Status.Applied.Services if services != nil { switch feastType { @@ -61,14 +61,14 @@ func hasPvcConfig(featureStore *feastdevv1alpha1.FeatureStore, feastType FeastSe return pvcConfig, pvcConfig != nil } -func shouldCreatePvc(featureStore *feastdevv1alpha1.FeatureStore, feastType FeastServiceType) (*feastdevv1alpha1.PvcCreate, bool) { +func shouldCreatePvc(featureStore *feastdevv1.FeatureStore, feastType FeastServiceType) (*feastdevv1.PvcCreate, bool) { if pvcConfig, ok := hasPvcConfig(featureStore, feastType); ok { return pvcConfig.Create, pvcConfig.Create != nil } return nil, false } -func shouldMountEmptyDir(featureStore *feastdevv1alpha1.FeatureStore) bool { +func shouldMountEmptyDir(featureStore *feastdevv1.FeatureStore) bool { for _, feastType := range feastServerTypes { if _, ok := hasPvcConfig(featureStore, feastType); !ok { return true @@ -77,26 +77,26 @@ func shouldMountEmptyDir(featureStore *feastdevv1alpha1.FeatureStore) bool { return false } -func getOfflineMountPath(featureStore *feastdevv1alpha1.FeatureStore) string { +func getOfflineMountPath(featureStore *feastdevv1.FeatureStore) string { if pvcConfig, ok := hasPvcConfig(featureStore, OfflineFeastType); ok { return pvcConfig.MountPath } return EphemeralPath } -func ApplyDefaultsToStatus(cr *feastdevv1alpha1.FeatureStore) { +func ApplyDefaultsToStatus(cr *feastdevv1.FeatureStore) { // overwrite status.applied with every reconcile cr.Spec.DeepCopyInto(&cr.Status.Applied) cr.Status.FeastVersion = feastversion.FeastVersion applied := &cr.Status.Applied if applied.FeastProjectDir == nil { - applied.FeastProjectDir = &feastdevv1alpha1.FeastProjectDir{ - Init: &feastdevv1alpha1.FeastInitOptions{}, + applied.FeastProjectDir = &feastdevv1.FeastProjectDir{ + Init: &feastdevv1.FeastInitOptions{}, } } if applied.Services == nil { - applied.Services = &feastdevv1alpha1.FeatureStoreServices{} + applied.Services = &feastdevv1.FeatureStoreServices{} } services := applied.Services @@ -105,15 +105,15 @@ func ApplyDefaultsToStatus(cr *feastdevv1alpha1.FeatureStore) { if services.Registry.Remote == nil { // if local registry not set, apply an empty pointer struct if services.Registry.Local == nil { - services.Registry.Local = &feastdevv1alpha1.LocalRegistryConfig{} + services.Registry.Local = &feastdevv1.LocalRegistryConfig{} } if services.Registry.Local.Persistence == nil { - services.Registry.Local.Persistence = &feastdevv1alpha1.RegistryPersistence{} + services.Registry.Local.Persistence = &feastdevv1.RegistryPersistence{} } if services.Registry.Local.Persistence.DBPersistence == nil { if services.Registry.Local.Persistence.FilePersistence == nil { - services.Registry.Local.Persistence.FilePersistence = &feastdevv1alpha1.RegistryFilePersistence{} + services.Registry.Local.Persistence.FilePersistence = &feastdevv1.RegistryFilePersistence{} } if len(services.Registry.Local.Persistence.FilePersistence.Path) == 0 { @@ -139,12 +139,12 @@ func ApplyDefaultsToStatus(cr *feastdevv1alpha1.FeatureStore) { if services.OfflineStore != nil { if services.OfflineStore.Persistence == nil { - services.OfflineStore.Persistence = &feastdevv1alpha1.OfflineStorePersistence{} + services.OfflineStore.Persistence = &feastdevv1.OfflineStorePersistence{} } if services.OfflineStore.Persistence.DBPersistence == nil { if services.OfflineStore.Persistence.FilePersistence == nil { - services.OfflineStore.Persistence.FilePersistence = &feastdevv1alpha1.OfflineStoreFilePersistence{} + services.OfflineStore.Persistence.FilePersistence = &feastdevv1.OfflineStoreFilePersistence{} } if len(services.OfflineStore.Persistence.FilePersistence.Type) == 0 { @@ -161,15 +161,15 @@ func ApplyDefaultsToStatus(cr *feastdevv1alpha1.FeatureStore) { // default to onlineStore service deployment if services.OnlineStore == nil { - services.OnlineStore = &feastdevv1alpha1.OnlineStore{} + services.OnlineStore = &feastdevv1.OnlineStore{} } if services.OnlineStore.Persistence == nil { - services.OnlineStore.Persistence = &feastdevv1alpha1.OnlineStorePersistence{} + services.OnlineStore.Persistence = &feastdevv1.OnlineStorePersistence{} } if services.OnlineStore.Persistence.DBPersistence == nil { if services.OnlineStore.Persistence.FilePersistence == nil { - services.OnlineStore.Persistence.FilePersistence = &feastdevv1alpha1.OnlineStoreFilePersistence{} + services.OnlineStore.Persistence.FilePersistence = &feastdevv1.OnlineStoreFilePersistence{} } if len(services.OnlineStore.Persistence.FilePersistence.Path) == 0 { @@ -180,7 +180,7 @@ func ApplyDefaultsToStatus(cr *feastdevv1alpha1.FeatureStore) { } if services.OnlineStore.Server == nil { - services.OnlineStore.Server = &feastdevv1alpha1.ServerConfigs{} + services.OnlineStore.Server = &feastdevv1.ServerConfigs{} } setDefaultCtrConfigs(&services.OnlineStore.Server.ContainerConfigs.DefaultCtrConfigs) @@ -189,12 +189,12 @@ func ApplyDefaultsToStatus(cr *feastdevv1alpha1.FeatureStore) { } if applied.CronJob == nil { - applied.CronJob = &feastdevv1alpha1.FeastCronJob{} + applied.CronJob = &feastdevv1.FeastCronJob{} } setDefaultCronJobConfigs(applied.CronJob) } -func setDefaultCtrConfigs(defaultConfigs *feastdevv1alpha1.DefaultCtrConfigs) { +func setDefaultCtrConfigs(defaultConfigs *feastdevv1.DefaultCtrConfigs) { if defaultConfigs.Image == nil { img := getFeatureServerImage() defaultConfigs.Image = &img @@ -209,7 +209,7 @@ func getFeatureServerImage() string { } func checkOfflineStoreFilePersistenceType(value string) error { - if slices.Contains(feastdevv1alpha1.ValidOfflineStoreFilePersistenceTypes, value) { + if slices.Contains(feastdevv1.ValidOfflineStoreFilePersistenceTypes, value) { return nil } return fmt.Errorf("invalid file type %s for offline store", value) @@ -224,7 +224,7 @@ func ensureRequestedStorage(resources *corev1.VolumeResourceRequirements, reques } } -func ensurePVCDefaults(pvc *feastdevv1alpha1.PvcConfig, feastType FeastServiceType) { +func ensurePVCDefaults(pvc *feastdevv1.PvcConfig, feastType FeastServiceType) { if pvc != nil { var storageRequest string switch feastType { @@ -244,7 +244,7 @@ func ensurePVCDefaults(pvc *feastdevv1alpha1.PvcConfig, feastType FeastServiceTy } } -func defaultOnlineStorePath(featureStore *feastdevv1alpha1.FeatureStore) string { +func defaultOnlineStorePath(featureStore *feastdevv1.FeatureStore) string { if _, ok := hasPvcConfig(featureStore, OnlineFeastType); ok { return DefaultOnlineStorePath } @@ -252,7 +252,7 @@ func defaultOnlineStorePath(featureStore *feastdevv1alpha1.FeatureStore) string return EphemeralPath + "/" + DefaultOnlineStorePath } -func defaultRegistryPath(featureStore *feastdevv1alpha1.FeatureStore) string { +func defaultRegistryPath(featureStore *feastdevv1.FeatureStore) string { if _, ok := hasPvcConfig(featureStore, RegistryFeastType); ok { return DefaultRegistryPath } @@ -261,21 +261,21 @@ func defaultRegistryPath(featureStore *feastdevv1alpha1.FeatureStore) string { } func checkOfflineStoreDBStorePersistenceType(value string) error { - if slices.Contains(feastdevv1alpha1.ValidOfflineStoreDBStorePersistenceTypes, value) { + if slices.Contains(feastdevv1.ValidOfflineStoreDBStorePersistenceTypes, value) { return nil } return fmt.Errorf("invalid DB store type %s for offline store", value) } func checkOnlineStoreDBStorePersistenceType(value string) error { - if slices.Contains(feastdevv1alpha1.ValidOnlineStoreDBStorePersistenceTypes, value) { + if slices.Contains(feastdevv1.ValidOnlineStoreDBStorePersistenceTypes, value) { return nil } return fmt.Errorf("invalid DB store type %s for online store", value) } func checkRegistryDBStorePersistenceType(value string) error { - if slices.Contains(feastdevv1alpha1.ValidRegistryDBStorePersistenceTypes, value) { + if slices.Contains(feastdevv1.ValidRegistryDBStorePersistenceTypes, value) { return nil } return fmt.Errorf("invalid DB store type %s for registry", value) @@ -438,19 +438,19 @@ func getContainerByType(feastType FeastServiceType, podSpec corev1.PodSpec) (int return -1, nil } -func GetRegistryVolume(featureStore *feastdevv1alpha1.FeatureStore, volumes []corev1.Volume) *corev1.Volume { +func GetRegistryVolume(featureStore *feastdevv1.FeatureStore, volumes []corev1.Volume) *corev1.Volume { return getVolumeByType(RegistryFeastType, featureStore, volumes) } -func GetOnlineVolume(featureStore *feastdevv1alpha1.FeatureStore, volumes []corev1.Volume) *corev1.Volume { +func GetOnlineVolume(featureStore *feastdevv1.FeatureStore, volumes []corev1.Volume) *corev1.Volume { return getVolumeByType(OnlineFeastType, featureStore, volumes) } -func GetOfflineVolume(featureStore *feastdevv1alpha1.FeatureStore, volumes []corev1.Volume) *corev1.Volume { +func GetOfflineVolume(featureStore *feastdevv1.FeatureStore, volumes []corev1.Volume) *corev1.Volume { return getVolumeByType(OfflineFeastType, featureStore, volumes) } -func getVolumeByType(feastType FeastServiceType, featureStore *feastdevv1alpha1.FeatureStore, volumes []corev1.Volume) *corev1.Volume { +func getVolumeByType(feastType FeastServiceType, featureStore *feastdevv1.FeatureStore, volumes []corev1.Volume) *corev1.Volume { for _, v := range volumes { if v.Name == GetFeastServiceName(featureStore, feastType) { return &v @@ -459,19 +459,19 @@ func getVolumeByType(feastType FeastServiceType, featureStore *feastdevv1alpha1. return nil } -func GetRegistryVolumeMount(featureStore *feastdevv1alpha1.FeatureStore, volumeMounts []corev1.VolumeMount) *corev1.VolumeMount { +func GetRegistryVolumeMount(featureStore *feastdevv1.FeatureStore, volumeMounts []corev1.VolumeMount) *corev1.VolumeMount { return getVolumeMountByType(RegistryFeastType, featureStore, volumeMounts) } -func GetOnlineVolumeMount(featureStore *feastdevv1alpha1.FeatureStore, volumeMounts []corev1.VolumeMount) *corev1.VolumeMount { +func GetOnlineVolumeMount(featureStore *feastdevv1.FeatureStore, volumeMounts []corev1.VolumeMount) *corev1.VolumeMount { return getVolumeMountByType(OnlineFeastType, featureStore, volumeMounts) } -func GetOfflineVolumeMount(featureStore *feastdevv1alpha1.FeatureStore, volumeMounts []corev1.VolumeMount) *corev1.VolumeMount { +func GetOfflineVolumeMount(featureStore *feastdevv1.FeatureStore, volumeMounts []corev1.VolumeMount) *corev1.VolumeMount { return getVolumeMountByType(OfflineFeastType, featureStore, volumeMounts) } -func getVolumeMountByType(feastType FeastServiceType, featureStore *feastdevv1alpha1.FeatureStore, volumeMounts []corev1.VolumeMount) *corev1.VolumeMount { +func getVolumeMountByType(feastType FeastServiceType, featureStore *feastdevv1.FeatureStore, volumeMounts []corev1.VolumeMount) *corev1.VolumeMount { for _, vm := range volumeMounts { if vm.Name == GetFeastServiceName(featureStore, feastType) { return &vm diff --git a/infra/feast-operator/internal/controller/suite_test.go b/infra/feast-operator/internal/controller/suite_test.go index c0d3ed100c1..5a266abc8ba 100644 --- a/infra/feast-operator/internal/controller/suite_test.go +++ b/infra/feast-operator/internal/controller/suite_test.go @@ -36,7 +36,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log/zap" feastdevv1 "github.com/feast-dev/feast/infra/feast-operator/api/v1" - feastdevv1alpha1 "github.com/feast-dev/feast/infra/feast-operator/api/v1alpha1" "github.com/feast-dev/feast/infra/feast-operator/internal/controller/services" // +kubebuilder:scaffold:imports ) @@ -76,8 +75,6 @@ var _ = BeforeSuite(func() { err = feastdevv1.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred()) - err = feastdevv1alpha1.AddToScheme(scheme.Scheme) - Expect(err).NotTo(HaveOccurred()) // +kubebuilder:scaffold:scheme diff --git a/infra/feast-operator/test/api/featurestore_types_test.go b/infra/feast-operator/test/api/featurestore_types_test.go index e8b08b549d0..d426c8e0d7e 100644 --- a/infra/feast-operator/test/api/featurestore_types_test.go +++ b/infra/feast-operator/test/api/featurestore_types_test.go @@ -12,7 +12,7 @@ import ( "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/log" - feastdevv1alpha1 "github.com/feast-dev/feast/infra/feast-operator/api/v1alpha1" + feastdevv1 "github.com/feast-dev/feast/infra/feast-operator/api/v1" "github.com/feast-dev/feast/infra/feast-operator/internal/controller/services" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -22,19 +22,19 @@ func boolPtr(b bool) *bool { return &b } -func createFeatureStore() *feastdevv1alpha1.FeatureStore { - return &feastdevv1alpha1.FeatureStore{ +func createFeatureStore() *feastdevv1.FeatureStore { + return &feastdevv1.FeatureStore{ ObjectMeta: metav1.ObjectMeta{ Name: resourceName, Namespace: namespaceName, }, - Spec: feastdevv1alpha1.FeatureStoreSpec{ + Spec: feastdevv1.FeatureStoreSpec{ FeastProject: "test_project", }, } } -func attemptInvalidCreationAndAsserts(ctx context.Context, featurestore *feastdevv1alpha1.FeatureStore, matcher string) { +func attemptInvalidCreationAndAsserts(ctx context.Context, featurestore *feastdevv1.FeatureStore, matcher string) { By("Creating the resource") logger := log.FromContext(ctx) logger.Info("Creating", "FeatureStore", featurestore) @@ -44,26 +44,26 @@ func attemptInvalidCreationAndAsserts(ctx context.Context, featurestore *feastde Expect(err.Error()).Should(ContainSubstring(matcher)) } -func onlineStoreWithAbsolutePathForPvc(featureStore *feastdevv1alpha1.FeatureStore) *feastdevv1alpha1.FeatureStore { +func onlineStoreWithAbsolutePathForPvc(featureStore *feastdevv1.FeatureStore) *feastdevv1.FeatureStore { fsCopy := featureStore.DeepCopy() - fsCopy.Spec.Services = &feastdevv1alpha1.FeatureStoreServices{ - OnlineStore: &feastdevv1alpha1.OnlineStore{ - Persistence: &feastdevv1alpha1.OnlineStorePersistence{ - FilePersistence: &feastdevv1alpha1.OnlineStoreFilePersistence{ + fsCopy.Spec.Services = &feastdevv1.FeatureStoreServices{ + OnlineStore: &feastdevv1.OnlineStore{ + Persistence: &feastdevv1.OnlineStorePersistence{ + FilePersistence: &feastdevv1.OnlineStoreFilePersistence{ Path: "/data/online_store.db", - PvcConfig: &feastdevv1alpha1.PvcConfig{}, + PvcConfig: &feastdevv1.PvcConfig{}, }, }, }, } return fsCopy } -func onlineStoreWithRelativePathForEphemeral(featureStore *feastdevv1alpha1.FeatureStore) *feastdevv1alpha1.FeatureStore { +func onlineStoreWithRelativePathForEphemeral(featureStore *feastdevv1.FeatureStore) *feastdevv1.FeatureStore { fsCopy := featureStore.DeepCopy() - fsCopy.Spec.Services = &feastdevv1alpha1.FeatureStoreServices{ - OnlineStore: &feastdevv1alpha1.OnlineStore{ - Persistence: &feastdevv1alpha1.OnlineStorePersistence{ - FilePersistence: &feastdevv1alpha1.OnlineStoreFilePersistence{ + fsCopy.Spec.Services = &feastdevv1.FeatureStoreServices{ + OnlineStore: &feastdevv1.OnlineStore{ + Persistence: &feastdevv1.OnlineStorePersistence{ + FilePersistence: &feastdevv1.OnlineStoreFilePersistence{ Path: "data/online_store.db", }, }, @@ -72,15 +72,15 @@ func onlineStoreWithRelativePathForEphemeral(featureStore *feastdevv1alpha1.Feat return fsCopy } -func onlineStoreWithObjectStoreBucketForPvc(path string, featureStore *feastdevv1alpha1.FeatureStore) *feastdevv1alpha1.FeatureStore { +func onlineStoreWithObjectStoreBucketForPvc(path string, featureStore *feastdevv1.FeatureStore) *feastdevv1.FeatureStore { fsCopy := featureStore.DeepCopy() - fsCopy.Spec.Services = &feastdevv1alpha1.FeatureStoreServices{ - OnlineStore: &feastdevv1alpha1.OnlineStore{ - Persistence: &feastdevv1alpha1.OnlineStorePersistence{ - FilePersistence: &feastdevv1alpha1.OnlineStoreFilePersistence{ + fsCopy.Spec.Services = &feastdevv1.FeatureStoreServices{ + OnlineStore: &feastdevv1.OnlineStore{ + Persistence: &feastdevv1.OnlineStorePersistence{ + FilePersistence: &feastdevv1.OnlineStoreFilePersistence{ Path: path, - PvcConfig: &feastdevv1alpha1.PvcConfig{ - Create: &feastdevv1alpha1.PvcCreate{}, + PvcConfig: &feastdevv1.PvcConfig{ + Create: &feastdevv1.PvcCreate{}, MountPath: "/data/online", }, }, @@ -90,12 +90,12 @@ func onlineStoreWithObjectStoreBucketForPvc(path string, featureStore *feastdevv return fsCopy } -func offlineStoreWithUnmanagedFileType(featureStore *feastdevv1alpha1.FeatureStore) *feastdevv1alpha1.FeatureStore { +func offlineStoreWithUnmanagedFileType(featureStore *feastdevv1.FeatureStore) *feastdevv1.FeatureStore { fsCopy := featureStore.DeepCopy() - fsCopy.Spec.Services = &feastdevv1alpha1.FeatureStoreServices{ - OfflineStore: &feastdevv1alpha1.OfflineStore{ - Persistence: &feastdevv1alpha1.OfflineStorePersistence{ - FilePersistence: &feastdevv1alpha1.OfflineStoreFilePersistence{ + fsCopy.Spec.Services = &feastdevv1.FeatureStoreServices{ + OfflineStore: &feastdevv1.OfflineStore{ + Persistence: &feastdevv1.OfflineStorePersistence{ + FilePersistence: &feastdevv1.OfflineStoreFilePersistence{ Type: "unmanaged", }, }, @@ -104,28 +104,28 @@ func offlineStoreWithUnmanagedFileType(featureStore *feastdevv1alpha1.FeatureSto return fsCopy } -func registryWithAbsolutePathForPvc(featureStore *feastdevv1alpha1.FeatureStore) *feastdevv1alpha1.FeatureStore { +func registryWithAbsolutePathForPvc(featureStore *feastdevv1.FeatureStore) *feastdevv1.FeatureStore { fsCopy := featureStore.DeepCopy() - fsCopy.Spec.Services = &feastdevv1alpha1.FeatureStoreServices{ - Registry: &feastdevv1alpha1.Registry{ - Local: &feastdevv1alpha1.LocalRegistryConfig{ - Persistence: &feastdevv1alpha1.RegistryPersistence{ - FilePersistence: &feastdevv1alpha1.RegistryFilePersistence{ + fsCopy.Spec.Services = &feastdevv1.FeatureStoreServices{ + Registry: &feastdevv1.Registry{ + Local: &feastdevv1.LocalRegistryConfig{ + Persistence: &feastdevv1.RegistryPersistence{ + FilePersistence: &feastdevv1.RegistryFilePersistence{ Path: "/data/registry.db", - PvcConfig: &feastdevv1alpha1.PvcConfig{}, + PvcConfig: &feastdevv1.PvcConfig{}, }}, }, }, } return fsCopy } -func registryWithRelativePathForEphemeral(featureStore *feastdevv1alpha1.FeatureStore) *feastdevv1alpha1.FeatureStore { +func registryWithRelativePathForEphemeral(featureStore *feastdevv1.FeatureStore) *feastdevv1.FeatureStore { fsCopy := featureStore.DeepCopy() - fsCopy.Spec.Services = &feastdevv1alpha1.FeatureStoreServices{ - Registry: &feastdevv1alpha1.Registry{ - Local: &feastdevv1alpha1.LocalRegistryConfig{ - Persistence: &feastdevv1alpha1.RegistryPersistence{ - FilePersistence: &feastdevv1alpha1.RegistryFilePersistence{ + fsCopy.Spec.Services = &feastdevv1.FeatureStoreServices{ + Registry: &feastdevv1.Registry{ + Local: &feastdevv1.LocalRegistryConfig{ + Persistence: &feastdevv1.RegistryPersistence{ + FilePersistence: &feastdevv1.RegistryFilePersistence{ Path: "data/online_store.db", }, }, @@ -134,16 +134,16 @@ func registryWithRelativePathForEphemeral(featureStore *feastdevv1alpha1.Feature } return fsCopy } -func registryWithObjectStoreBucketForPvc(path string, featureStore *feastdevv1alpha1.FeatureStore) *feastdevv1alpha1.FeatureStore { +func registryWithObjectStoreBucketForPvc(path string, featureStore *feastdevv1.FeatureStore) *feastdevv1.FeatureStore { fsCopy := featureStore.DeepCopy() - fsCopy.Spec.Services = &feastdevv1alpha1.FeatureStoreServices{ - Registry: &feastdevv1alpha1.Registry{ - Local: &feastdevv1alpha1.LocalRegistryConfig{ - Persistence: &feastdevv1alpha1.RegistryPersistence{ - FilePersistence: &feastdevv1alpha1.RegistryFilePersistence{ + fsCopy.Spec.Services = &feastdevv1.FeatureStoreServices{ + Registry: &feastdevv1.Registry{ + Local: &feastdevv1.LocalRegistryConfig{ + Persistence: &feastdevv1.RegistryPersistence{ + FilePersistence: &feastdevv1.RegistryFilePersistence{ Path: path, - PvcConfig: &feastdevv1alpha1.PvcConfig{ - Create: &feastdevv1alpha1.PvcCreate{}, + PvcConfig: &feastdevv1.PvcConfig{ + Create: &feastdevv1.PvcCreate{}, MountPath: "/data/registry", }, }, @@ -153,13 +153,13 @@ func registryWithObjectStoreBucketForPvc(path string, featureStore *feastdevv1al } return fsCopy } -func registryWithS3AdditionalKeywordsForFile(featureStore *feastdevv1alpha1.FeatureStore) *feastdevv1alpha1.FeatureStore { +func registryWithS3AdditionalKeywordsForFile(featureStore *feastdevv1.FeatureStore) *feastdevv1.FeatureStore { fsCopy := featureStore.DeepCopy() - fsCopy.Spec.Services = &feastdevv1alpha1.FeatureStoreServices{ - Registry: &feastdevv1alpha1.Registry{ - Local: &feastdevv1alpha1.LocalRegistryConfig{ - Persistence: &feastdevv1alpha1.RegistryPersistence{ - FilePersistence: &feastdevv1alpha1.RegistryFilePersistence{ + fsCopy.Spec.Services = &feastdevv1.FeatureStoreServices{ + Registry: &feastdevv1.Registry{ + Local: &feastdevv1.LocalRegistryConfig{ + Persistence: &feastdevv1.RegistryPersistence{ + FilePersistence: &feastdevv1.RegistryFilePersistence{ Path: "/data/online_store.db", S3AdditionalKwargs: &map[string]string{}, }, @@ -169,13 +169,13 @@ func registryWithS3AdditionalKeywordsForFile(featureStore *feastdevv1alpha1.Feat } return fsCopy } -func registryWithS3AdditionalKeywordsForGsBucket(featureStore *feastdevv1alpha1.FeatureStore) *feastdevv1alpha1.FeatureStore { +func registryWithS3AdditionalKeywordsForGsBucket(featureStore *feastdevv1.FeatureStore) *feastdevv1.FeatureStore { fsCopy := featureStore.DeepCopy() - fsCopy.Spec.Services = &feastdevv1alpha1.FeatureStoreServices{ - Registry: &feastdevv1alpha1.Registry{ - Local: &feastdevv1alpha1.LocalRegistryConfig{ - Persistence: &feastdevv1alpha1.RegistryPersistence{ - FilePersistence: &feastdevv1alpha1.RegistryFilePersistence{ + fsCopy.Spec.Services = &feastdevv1.FeatureStoreServices{ + Registry: &feastdevv1.Registry{ + Local: &feastdevv1.LocalRegistryConfig{ + Persistence: &feastdevv1.RegistryPersistence{ + FilePersistence: &feastdevv1.RegistryFilePersistence{ Path: "gs://online_store.db", S3AdditionalKwargs: &map[string]string{}, }, @@ -186,30 +186,30 @@ func registryWithS3AdditionalKeywordsForGsBucket(featureStore *feastdevv1alpha1. return fsCopy } -func pvcConfigWithNeitherRefNorCreate(featureStore *feastdevv1alpha1.FeatureStore) *feastdevv1alpha1.FeatureStore { +func pvcConfigWithNeitherRefNorCreate(featureStore *feastdevv1.FeatureStore) *feastdevv1.FeatureStore { fsCopy := featureStore.DeepCopy() - fsCopy.Spec.Services = &feastdevv1alpha1.FeatureStoreServices{ - OfflineStore: &feastdevv1alpha1.OfflineStore{ - Persistence: &feastdevv1alpha1.OfflineStorePersistence{ - FilePersistence: &feastdevv1alpha1.OfflineStoreFilePersistence{ - PvcConfig: &feastdevv1alpha1.PvcConfig{}, + fsCopy.Spec.Services = &feastdevv1.FeatureStoreServices{ + OfflineStore: &feastdevv1.OfflineStore{ + Persistence: &feastdevv1.OfflineStorePersistence{ + FilePersistence: &feastdevv1.OfflineStoreFilePersistence{ + PvcConfig: &feastdevv1.PvcConfig{}, }, }, }, } return fsCopy } -func pvcConfigWithBothRefAndCreate(featureStore *feastdevv1alpha1.FeatureStore) *feastdevv1alpha1.FeatureStore { +func pvcConfigWithBothRefAndCreate(featureStore *feastdevv1.FeatureStore) *feastdevv1.FeatureStore { fsCopy := featureStore.DeepCopy() - fsCopy.Spec.Services = &feastdevv1alpha1.FeatureStoreServices{ - OfflineStore: &feastdevv1alpha1.OfflineStore{ - Persistence: &feastdevv1alpha1.OfflineStorePersistence{ - FilePersistence: &feastdevv1alpha1.OfflineStoreFilePersistence{ - PvcConfig: &feastdevv1alpha1.PvcConfig{ + fsCopy.Spec.Services = &feastdevv1.FeatureStoreServices{ + OfflineStore: &feastdevv1.OfflineStore{ + Persistence: &feastdevv1.OfflineStorePersistence{ + FilePersistence: &feastdevv1.OfflineStoreFilePersistence{ + PvcConfig: &feastdevv1.PvcConfig{ Ref: &corev1.LocalObjectReference{ Name: "pvc", }, - Create: &feastdevv1alpha1.PvcCreate{}, + Create: &feastdevv1.PvcCreate{}, }, }, }, @@ -218,35 +218,35 @@ func pvcConfigWithBothRefAndCreate(featureStore *feastdevv1alpha1.FeatureStore) return fsCopy } -func pvcConfigWithNoResources(featureStore *feastdevv1alpha1.FeatureStore) *feastdevv1alpha1.FeatureStore { +func pvcConfigWithNoResources(featureStore *feastdevv1.FeatureStore) *feastdevv1.FeatureStore { fsCopy := featureStore.DeepCopy() - fsCopy.Spec.Services = &feastdevv1alpha1.FeatureStoreServices{ - OfflineStore: &feastdevv1alpha1.OfflineStore{ - Persistence: &feastdevv1alpha1.OfflineStorePersistence{ - FilePersistence: &feastdevv1alpha1.OfflineStoreFilePersistence{ - PvcConfig: &feastdevv1alpha1.PvcConfig{ - Create: &feastdevv1alpha1.PvcCreate{}, + fsCopy.Spec.Services = &feastdevv1.FeatureStoreServices{ + OfflineStore: &feastdevv1.OfflineStore{ + Persistence: &feastdevv1.OfflineStorePersistence{ + FilePersistence: &feastdevv1.OfflineStoreFilePersistence{ + PvcConfig: &feastdevv1.PvcConfig{ + Create: &feastdevv1.PvcCreate{}, MountPath: "/data/offline", }, }, }, }, - OnlineStore: &feastdevv1alpha1.OnlineStore{ - Persistence: &feastdevv1alpha1.OnlineStorePersistence{ - FilePersistence: &feastdevv1alpha1.OnlineStoreFilePersistence{ - PvcConfig: &feastdevv1alpha1.PvcConfig{ - Create: &feastdevv1alpha1.PvcCreate{}, + OnlineStore: &feastdevv1.OnlineStore{ + Persistence: &feastdevv1.OnlineStorePersistence{ + FilePersistence: &feastdevv1.OnlineStoreFilePersistence{ + PvcConfig: &feastdevv1.PvcConfig{ + Create: &feastdevv1.PvcCreate{}, MountPath: "/data/online", }, }, }, }, - Registry: &feastdevv1alpha1.Registry{ - Local: &feastdevv1alpha1.LocalRegistryConfig{ - Persistence: &feastdevv1alpha1.RegistryPersistence{ - FilePersistence: &feastdevv1alpha1.RegistryFilePersistence{ - PvcConfig: &feastdevv1alpha1.PvcConfig{ - Create: &feastdevv1alpha1.PvcCreate{}, + Registry: &feastdevv1.Registry{ + Local: &feastdevv1.LocalRegistryConfig{ + Persistence: &feastdevv1.RegistryPersistence{ + FilePersistence: &feastdevv1.RegistryFilePersistence{ + PvcConfig: &feastdevv1.PvcConfig{ + Create: &feastdevv1.PvcCreate{}, MountPath: "/data/registry", }, }, @@ -257,7 +257,7 @@ func pvcConfigWithNoResources(featureStore *feastdevv1alpha1.FeatureStore) *feas return fsCopy } -func pvcConfigWithResources(featureStore *feastdevv1alpha1.FeatureStore) *feastdevv1alpha1.FeatureStore { +func pvcConfigWithResources(featureStore *feastdevv1.FeatureStore) *feastdevv1.FeatureStore { fsCopy := pvcConfigWithNoResources(featureStore) fsCopy.Spec.Services.OfflineStore.Persistence.FilePersistence.PvcConfig.Create.Resources = corev1.VolumeResourceRequirements{ Requests: corev1.ResourceList{ @@ -277,31 +277,31 @@ func pvcConfigWithResources(featureStore *feastdevv1alpha1.FeatureStore) *feastd return fsCopy } -func authzConfigWithKubernetes(featureStore *feastdevv1alpha1.FeatureStore) *feastdevv1alpha1.FeatureStore { +func authzConfigWithKubernetes(featureStore *feastdevv1.FeatureStore) *feastdevv1.FeatureStore { fsCopy := featureStore.DeepCopy() if fsCopy.Spec.AuthzConfig == nil { - fsCopy.Spec.AuthzConfig = &feastdevv1alpha1.AuthzConfig{} + fsCopy.Spec.AuthzConfig = &feastdevv1.AuthzConfig{} } - fsCopy.Spec.AuthzConfig.KubernetesAuthz = &feastdevv1alpha1.KubernetesAuthz{ + fsCopy.Spec.AuthzConfig.KubernetesAuthz = &feastdevv1.KubernetesAuthz{ Roles: []string{}, } return fsCopy } -func authzConfigWithOidc(featureStore *feastdevv1alpha1.FeatureStore) *feastdevv1alpha1.FeatureStore { +func authzConfigWithOidc(featureStore *feastdevv1.FeatureStore) *feastdevv1.FeatureStore { fsCopy := featureStore.DeepCopy() if fsCopy.Spec.AuthzConfig == nil { - fsCopy.Spec.AuthzConfig = &feastdevv1alpha1.AuthzConfig{} + fsCopy.Spec.AuthzConfig = &feastdevv1.AuthzConfig{} } - fsCopy.Spec.AuthzConfig.OidcAuthz = &feastdevv1alpha1.OidcAuthz{} + fsCopy.Spec.AuthzConfig.OidcAuthz = &feastdevv1.OidcAuthz{} return fsCopy } -func onlineStoreWithDBPersistenceType(dbPersistenceType string, featureStore *feastdevv1alpha1.FeatureStore) *feastdevv1alpha1.FeatureStore { +func onlineStoreWithDBPersistenceType(dbPersistenceType string, featureStore *feastdevv1.FeatureStore) *feastdevv1.FeatureStore { fsCopy := featureStore.DeepCopy() - fsCopy.Spec.Services = &feastdevv1alpha1.FeatureStoreServices{ - OnlineStore: &feastdevv1alpha1.OnlineStore{ - Persistence: &feastdevv1alpha1.OnlineStorePersistence{ - DBPersistence: &feastdevv1alpha1.OnlineStoreDBStorePersistence{ + fsCopy.Spec.Services = &feastdevv1.FeatureStoreServices{ + OnlineStore: &feastdevv1.OnlineStore{ + Persistence: &feastdevv1.OnlineStorePersistence{ + DBPersistence: &feastdevv1.OnlineStoreDBStorePersistence{ Type: dbPersistenceType, }, }, @@ -310,12 +310,12 @@ func onlineStoreWithDBPersistenceType(dbPersistenceType string, featureStore *fe return fsCopy } -func offlineStoreWithDBPersistenceType(dbPersistenceType string, featureStore *feastdevv1alpha1.FeatureStore) *feastdevv1alpha1.FeatureStore { +func offlineStoreWithDBPersistenceType(dbPersistenceType string, featureStore *feastdevv1.FeatureStore) *feastdevv1.FeatureStore { fsCopy := featureStore.DeepCopy() - fsCopy.Spec.Services = &feastdevv1alpha1.FeatureStoreServices{ - OfflineStore: &feastdevv1alpha1.OfflineStore{ - Persistence: &feastdevv1alpha1.OfflineStorePersistence{ - DBPersistence: &feastdevv1alpha1.OfflineStoreDBStorePersistence{ + fsCopy.Spec.Services = &feastdevv1.FeatureStoreServices{ + OfflineStore: &feastdevv1.OfflineStore{ + Persistence: &feastdevv1.OfflineStorePersistence{ + DBPersistence: &feastdevv1.OfflineStoreDBStorePersistence{ Type: dbPersistenceType, }, }, @@ -324,13 +324,13 @@ func offlineStoreWithDBPersistenceType(dbPersistenceType string, featureStore *f return fsCopy } -func registryStoreWithDBPersistenceType(dbPersistenceType string, featureStore *feastdevv1alpha1.FeatureStore) *feastdevv1alpha1.FeatureStore { +func registryStoreWithDBPersistenceType(dbPersistenceType string, featureStore *feastdevv1.FeatureStore) *feastdevv1.FeatureStore { fsCopy := featureStore.DeepCopy() - fsCopy.Spec.Services = &feastdevv1alpha1.FeatureStoreServices{ - Registry: &feastdevv1alpha1.Registry{ - Local: &feastdevv1alpha1.LocalRegistryConfig{ - Persistence: &feastdevv1alpha1.RegistryPersistence{ - DBPersistence: &feastdevv1alpha1.RegistryDBStorePersistence{ + fsCopy.Spec.Services = &feastdevv1.FeatureStoreServices{ + Registry: &feastdevv1.Registry{ + Local: &feastdevv1.LocalRegistryConfig{ + Persistence: &feastdevv1.RegistryPersistence{ + DBPersistence: &feastdevv1.RegistryDBStorePersistence{ Type: dbPersistenceType, }, }, @@ -340,12 +340,12 @@ func registryStoreWithDBPersistenceType(dbPersistenceType string, featureStore * return fsCopy } -func registryWithRestAPIFalse(featureStore *feastdevv1alpha1.FeatureStore) *feastdevv1alpha1.FeatureStore { +func registryWithRestAPIFalse(featureStore *feastdevv1.FeatureStore) *feastdevv1.FeatureStore { fsCopy := featureStore.DeepCopy() - fsCopy.Spec.Services = &feastdevv1alpha1.FeatureStoreServices{ - Registry: &feastdevv1alpha1.Registry{ - Local: &feastdevv1alpha1.LocalRegistryConfig{ - Server: &feastdevv1alpha1.RegistryServerConfigs{ + fsCopy.Spec.Services = &feastdevv1.FeatureStoreServices{ + Registry: &feastdevv1.Registry{ + Local: &feastdevv1.LocalRegistryConfig{ + Server: &feastdevv1.RegistryServerConfigs{ RestAPI: boolPtr(false), }, }, @@ -354,12 +354,12 @@ func registryWithRestAPIFalse(featureStore *feastdevv1alpha1.FeatureStore) *feas return fsCopy } -func registryWithOnlyRestAPI(featureStore *feastdevv1alpha1.FeatureStore) *feastdevv1alpha1.FeatureStore { +func registryWithOnlyRestAPI(featureStore *feastdevv1.FeatureStore) *feastdevv1.FeatureStore { fsCopy := featureStore.DeepCopy() - fsCopy.Spec.Services = &feastdevv1alpha1.FeatureStoreServices{ - Registry: &feastdevv1alpha1.Registry{ - Local: &feastdevv1alpha1.LocalRegistryConfig{ - Server: &feastdevv1alpha1.RegistryServerConfigs{ + fsCopy.Spec.Services = &feastdevv1.FeatureStoreServices{ + Registry: &feastdevv1.Registry{ + Local: &feastdevv1.LocalRegistryConfig{ + Server: &feastdevv1.RegistryServerConfigs{ RestAPI: boolPtr(true), }, }, @@ -368,12 +368,12 @@ func registryWithOnlyRestAPI(featureStore *feastdevv1alpha1.FeatureStore) *feast return fsCopy } -func registryWithOnlyGRPC(featureStore *feastdevv1alpha1.FeatureStore) *feastdevv1alpha1.FeatureStore { +func registryWithOnlyGRPC(featureStore *feastdevv1.FeatureStore) *feastdevv1.FeatureStore { fsCopy := featureStore.DeepCopy() - fsCopy.Spec.Services = &feastdevv1alpha1.FeatureStoreServices{ - Registry: &feastdevv1alpha1.Registry{ - Local: &feastdevv1alpha1.LocalRegistryConfig{ - Server: &feastdevv1alpha1.RegistryServerConfigs{ + fsCopy.Spec.Services = &feastdevv1.FeatureStoreServices{ + Registry: &feastdevv1.Registry{ + Local: &feastdevv1.LocalRegistryConfig{ + Server: &feastdevv1.RegistryServerConfigs{ GRPC: boolPtr(true), }, }, @@ -382,12 +382,12 @@ func registryWithOnlyGRPC(featureStore *feastdevv1alpha1.FeatureStore) *feastdev return fsCopy } -func registryWithBothAPIs(featureStore *feastdevv1alpha1.FeatureStore) *feastdevv1alpha1.FeatureStore { +func registryWithBothAPIs(featureStore *feastdevv1.FeatureStore) *feastdevv1.FeatureStore { fsCopy := featureStore.DeepCopy() - fsCopy.Spec.Services = &feastdevv1alpha1.FeatureStoreServices{ - Registry: &feastdevv1alpha1.Registry{ - Local: &feastdevv1alpha1.LocalRegistryConfig{ - Server: &feastdevv1alpha1.RegistryServerConfigs{ + fsCopy.Spec.Services = &feastdevv1.FeatureStoreServices{ + Registry: &feastdevv1.Registry{ + Local: &feastdevv1.LocalRegistryConfig{ + Server: &feastdevv1.RegistryServerConfigs{ RestAPI: boolPtr(true), GRPC: boolPtr(true), }, @@ -397,24 +397,24 @@ func registryWithBothAPIs(featureStore *feastdevv1alpha1.FeatureStore) *feastdev return fsCopy } -func registryWithNoAPIs(featureStore *feastdevv1alpha1.FeatureStore) *feastdevv1alpha1.FeatureStore { +func registryWithNoAPIs(featureStore *feastdevv1.FeatureStore) *feastdevv1.FeatureStore { fsCopy := featureStore.DeepCopy() - fsCopy.Spec.Services = &feastdevv1alpha1.FeatureStoreServices{ - Registry: &feastdevv1alpha1.Registry{ - Local: &feastdevv1alpha1.LocalRegistryConfig{ - Server: &feastdevv1alpha1.RegistryServerConfigs{}, + fsCopy.Spec.Services = &feastdevv1.FeatureStoreServices{ + Registry: &feastdevv1.Registry{ + Local: &feastdevv1.LocalRegistryConfig{ + Server: &feastdevv1.RegistryServerConfigs{}, }, }, } return fsCopy } -func registryWithBothFalse(featureStore *feastdevv1alpha1.FeatureStore) *feastdevv1alpha1.FeatureStore { +func registryWithBothFalse(featureStore *feastdevv1.FeatureStore) *feastdevv1.FeatureStore { fsCopy := featureStore.DeepCopy() - fsCopy.Spec.Services = &feastdevv1alpha1.FeatureStoreServices{ - Registry: &feastdevv1alpha1.Registry{ - Local: &feastdevv1alpha1.LocalRegistryConfig{ - Server: &feastdevv1alpha1.RegistryServerConfigs{ + fsCopy.Spec.Services = &feastdevv1.FeatureStoreServices{ + Registry: &feastdevv1.Registry{ + Local: &feastdevv1.LocalRegistryConfig{ + Server: &feastdevv1.RegistryServerConfigs{ RestAPI: boolPtr(false), GRPC: boolPtr(false), }, @@ -424,12 +424,12 @@ func registryWithBothFalse(featureStore *feastdevv1alpha1.FeatureStore) *feastde return fsCopy } -func registryWithGRPCFalse(featureStore *feastdevv1alpha1.FeatureStore) *feastdevv1alpha1.FeatureStore { +func registryWithGRPCFalse(featureStore *feastdevv1.FeatureStore) *feastdevv1.FeatureStore { fsCopy := featureStore.DeepCopy() - fsCopy.Spec.Services = &feastdevv1alpha1.FeatureStoreServices{ - Registry: &feastdevv1alpha1.Registry{ - Local: &feastdevv1alpha1.LocalRegistryConfig{ - Server: &feastdevv1alpha1.RegistryServerConfigs{ + fsCopy.Spec.Services = &feastdevv1.FeatureStoreServices{ + Registry: &feastdevv1.Registry{ + Local: &feastdevv1.LocalRegistryConfig{ + Server: &feastdevv1.RegistryServerConfigs{ GRPC: boolPtr(false), }, }, @@ -438,9 +438,9 @@ func registryWithGRPCFalse(featureStore *feastdevv1alpha1.FeatureStore) *feastde return fsCopy } -func cronJobWithAnnotations(featureStore *feastdevv1alpha1.FeatureStore) *feastdevv1alpha1.FeatureStore { +func cronJobWithAnnotations(featureStore *feastdevv1.FeatureStore) *feastdevv1.FeatureStore { fsCopy := featureStore.DeepCopy() - fsCopy.Spec.CronJob = &feastdevv1alpha1.FeastCronJob{ + fsCopy.Spec.CronJob = &feastdevv1.FeastCronJob{ Annotations: map[string]string{ "test-annotation": "test-value", "another-annotation": "another-value", @@ -450,18 +450,18 @@ func cronJobWithAnnotations(featureStore *feastdevv1alpha1.FeatureStore) *feastd return fsCopy } -func cronJobWithEmptyAnnotations(featureStore *feastdevv1alpha1.FeatureStore) *feastdevv1alpha1.FeatureStore { +func cronJobWithEmptyAnnotations(featureStore *feastdevv1.FeatureStore) *feastdevv1.FeatureStore { fsCopy := featureStore.DeepCopy() - fsCopy.Spec.CronJob = &feastdevv1alpha1.FeastCronJob{ + fsCopy.Spec.CronJob = &feastdevv1.FeastCronJob{ Annotations: map[string]string{}, Schedule: "0 0 * * *", } return fsCopy } -func cronJobWithoutAnnotations(featureStore *feastdevv1alpha1.FeatureStore) *feastdevv1alpha1.FeatureStore { +func cronJobWithoutAnnotations(featureStore *feastdevv1.FeatureStore) *feastdevv1.FeatureStore { fsCopy := featureStore.DeepCopy() - fsCopy.Spec.CronJob = &feastdevv1alpha1.FeastCronJob{ + fsCopy.Spec.CronJob = &feastdevv1.FeastCronJob{ Schedule: "0 0 * * *", } return fsCopy @@ -485,7 +485,7 @@ var typeNamespacedName = types.NamespacedName{ Namespace: "default", } -func initContext() (context.Context, *feastdevv1alpha1.FeatureStore) { +func initContext() (context.Context, *feastdevv1.FeatureStore) { ctx := context.Background() featurestore := createFeatureStore() @@ -520,7 +520,7 @@ var _ = Describe("FeatureStore API", func() { }) It("should fail when db persistence type is invalid", func() { - attemptInvalidCreationAndAsserts(ctx, onlineStoreWithDBPersistenceType("invalid", featurestore), "Unsupported value: \"invalid\": supported values: "+quotedSlice(feastdevv1alpha1.ValidOnlineStoreDBStorePersistenceTypes)) + attemptInvalidCreationAndAsserts(ctx, onlineStoreWithDBPersistenceType("invalid", featurestore), "Unsupported value: \"invalid\": supported values: "+quotedSlice(feastdevv1.ValidOnlineStoreDBStorePersistenceTypes)) }) }) @@ -531,7 +531,7 @@ var _ = Describe("FeatureStore API", func() { attemptInvalidCreationAndAsserts(ctx, offlineStoreWithUnmanagedFileType(featurestore), "Unsupported value") }) It("should fail when db persistence type is invalid", func() { - attemptInvalidCreationAndAsserts(ctx, offlineStoreWithDBPersistenceType("invalid", featurestore), "Unsupported value: \"invalid\": supported values: "+quotedSlice(feastdevv1alpha1.ValidOfflineStoreDBStorePersistenceTypes)) + attemptInvalidCreationAndAsserts(ctx, offlineStoreWithDBPersistenceType("invalid", featurestore), "Unsupported value: \"invalid\": supported values: "+quotedSlice(feastdevv1.ValidOfflineStoreDBStorePersistenceTypes)) }) }) @@ -553,7 +553,7 @@ var _ = Describe("FeatureStore API", func() { attemptInvalidCreationAndAsserts(ctx, registryWithS3AdditionalKeywordsForGsBucket(featurestore), "Additional S3 settings are available only for S3 object store URIs") }) It("should fail when db persistence type is invalid", func() { - attemptInvalidCreationAndAsserts(ctx, registryStoreWithDBPersistenceType("invalid", featurestore), "Unsupported value: \"invalid\": supported values: "+quotedSlice(feastdevv1alpha1.ValidRegistryDBStorePersistenceTypes)) + attemptInvalidCreationAndAsserts(ctx, registryStoreWithDBPersistenceType("invalid", featurestore), "Unsupported value: \"invalid\": supported values: "+quotedSlice(feastdevv1.ValidRegistryDBStorePersistenceTypes)) }) }) @@ -613,13 +613,13 @@ var _ = Describe("FeatureStore API", func() { BeforeEach(func() { By("verifying the custom resource FeatureStore is not there") - resource := &feastdevv1alpha1.FeatureStore{} + resource := &feastdevv1.FeatureStore{} err := k8sClient.Get(ctx, typeNamespacedName, resource) Expect(err != nil && errors.IsNotFound(err)).To(BeTrue()) }) AfterEach(func() { By("Cleaning up the test resource") - resource := &feastdevv1alpha1.FeatureStore{} + resource := &feastdevv1.FeatureStore{} err := k8sClient.Get(ctx, typeNamespacedName, resource) if err == nil { Expect(k8sClient.Delete(ctx, resource)).To(Succeed()) @@ -680,13 +680,13 @@ var _ = Describe("FeatureStore API", func() { BeforeEach(func() { By("verifying the custom resource FeatureStore is not there") - resource := &feastdevv1alpha1.FeatureStore{} + resource := &feastdevv1.FeatureStore{} err := k8sClient.Get(ctx, typeNamespacedName, resource) Expect(err != nil && errors.IsNotFound(err)).To(BeTrue()) }) AfterEach(func() { By("Cleaning up the test resource") - resource := &feastdevv1alpha1.FeatureStore{} + resource := &feastdevv1.FeatureStore{} err := k8sClient.Get(ctx, typeNamespacedName, resource) if err == nil { Expect(k8sClient.Delete(ctx, resource)).To(Succeed()) diff --git a/infra/feast-operator/test/api/suite_test.go b/infra/feast-operator/test/api/suite_test.go index e8c46a240c1..558068a7957 100644 --- a/infra/feast-operator/test/api/suite_test.go +++ b/infra/feast-operator/test/api/suite_test.go @@ -25,7 +25,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - feastdevv1alpha1 "github.com/feast-dev/feast/infra/feast-operator/api/v1alpha1" + feastdevv1 "github.com/feast-dev/feast/infra/feast-operator/api/v1" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" @@ -71,7 +71,7 @@ var _ = BeforeSuite(func() { Expect(err).NotTo(HaveOccurred()) Expect(cfg).NotTo(BeNil()) - err = feastdevv1alpha1.AddToScheme(scheme.Scheme) + err = feastdevv1.AddToScheme(scheme.Scheme) Expect(err).NotTo(HaveOccurred()) // +kubebuilder:scaffold:scheme diff --git a/infra/feast-operator/test/data-source-types/data_source_types_test.go b/infra/feast-operator/test/data-source-types/data_source_types_test.go index 8448b2c4212..09d71239349 100644 --- a/infra/feast-operator/test/data-source-types/data_source_types_test.go +++ b/infra/feast-operator/test/data-source-types/data_source_types_test.go @@ -9,7 +9,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" - feastdevv1alpha1 "github.com/feast-dev/feast/infra/feast-operator/api/v1alpha1" + feastdevv1 "github.com/feast-dev/feast/infra/feast-operator/api/v1" "github.com/feast-dev/feast/infra/feast-operator/internal/controller/services" ) @@ -23,16 +23,16 @@ var _ = Describe("FeatureStore Data Source Types", func() { Context("When checking against the python code in feast.repo_config", func() { It("should match defined registry persistence types in the operator", func() { registryFilePersistenceTypes := []string{string(services.RegistryFileConfigType)} - registryPersistenceTypes := append(feastdevv1alpha1.ValidRegistryDBStorePersistenceTypes, registryFilePersistenceTypes...) + registryPersistenceTypes := append(feastdevv1.ValidRegistryDBStorePersistenceTypes, registryFilePersistenceTypes...) checkPythonPersistenceTypes("registry.out", registryPersistenceTypes) }) It("should match defined onlineStore persistence types in the operator", func() { onlineFilePersistenceTypes := []string{string(services.OnlineSqliteConfigType)} - onlinePersistenceTypes := append(feastdevv1alpha1.ValidOnlineStoreDBStorePersistenceTypes, onlineFilePersistenceTypes...) + onlinePersistenceTypes := append(feastdevv1.ValidOnlineStoreDBStorePersistenceTypes, onlineFilePersistenceTypes...) checkPythonPersistenceTypes("online-store.out", onlinePersistenceTypes) }) It("should match defined offlineStore persistence types in the operator", func() { - offlinePersistenceTypes := append(feastdevv1alpha1.ValidOfflineStoreDBStorePersistenceTypes, feastdevv1alpha1.ValidOfflineStoreFilePersistenceTypes...) + offlinePersistenceTypes := append(feastdevv1.ValidOfflineStoreDBStorePersistenceTypes, feastdevv1.ValidOfflineStoreFilePersistenceTypes...) checkPythonPersistenceTypes("offline-store.out", offlinePersistenceTypes) }) }) diff --git a/infra/feast-operator/test/e2e/e2e_test.go b/infra/feast-operator/test/e2e/e2e_test.go index fb2ce69992a..4515dcccc9f 100644 --- a/infra/feast-operator/test/e2e/e2e_test.go +++ b/infra/feast-operator/test/e2e/e2e_test.go @@ -40,12 +40,12 @@ var _ = Describe("controller", Ordered, func() { applyAndMaterializeTest := "TestApplyAndMaterializeFeastDefinitions" runTestDeploySimpleCRFunc := utils.GetTestDeploySimpleCRFunc("/test/e2e", - "test/testdata/feast_integration_test_crs/v1alpha1_default_featurestore.yaml", + "test/testdata/feast_integration_test_crs/v1_default_featurestore.yaml", featureStoreName, feastResourceName, feastK8sResourceNames, namespace) runTestWithRemoteRegistryFunction := utils.GetTestWithRemoteRegistryFunc("/test/e2e", - "test/testdata/feast_integration_test_crs/v1alpha1_default_featurestore.yaml", - "test/testdata/feast_integration_test_crs/v1alpha1_remote_registry_featurestore.yaml", + "test/testdata/feast_integration_test_crs/v1_default_featurestore.yaml", + "test/testdata/feast_integration_test_crs/v1_remote_registry_featurestore.yaml", featureStoreName, feastResourceName, feastK8sResourceNames, namespace) runTestApplyAndMaterializeFunc := utils.RunTestApplyAndMaterializeFunc("/test/e2e", namespace, "credit-scoring", utils.FeastPrefix+"credit-scoring") diff --git a/infra/feast-operator/test/e2e_rhoai/feast_wb_connection_integration_test.go b/infra/feast-operator/test/e2e_rhoai/feast_wb_connection_integration_test.go index a284dc9e839..5b42bfe1276 100644 --- a/infra/feast-operator/test/e2e_rhoai/feast_wb_connection_integration_test.go +++ b/infra/feast-operator/test/e2e_rhoai/feast_wb_connection_integration_test.go @@ -43,16 +43,6 @@ var _ = Describe("Feast Workbench Integration Connection Testing", Ordered, func feastCRName = "credit-scoring" ) - // Create and monitor notebook - createAndMonitorNotebook := func() { - nbParams := utils.GetNotebookParams(namespace, configMapName, notebookPVC, notebookName, testDir) - By("Creating Jupyter Notebook") - Expect(utils.CreateNotebook(nbParams)).To(Succeed(), "Failed to create notebook") - - By("Monitoring notebook logs") - Expect(utils.MonitorNotebookPod(namespace, "jupyter-nb-", notebookName)).To(Succeed(), "Notebook execution failed") - } - // Parameterized test function that handles both auth and non-auth scenarios runFeastWorkbenchIntegration := func(authEnabled bool) { // Apply permissions only if auth is enabled @@ -61,24 +51,8 @@ var _ = Describe("Feast Workbench Integration Connection Testing", Ordered, func utils.ApplyFeastPermissions(permissionFile, "/feast-data/credit_scoring_local/feature_repo/permissions.py", namespace, feastDeploymentName) } - By(fmt.Sprintf("Setting namespace context to : %s", namespace)) - Expect(utils.SetNamespaceContext(namespace, testDir)).To(Succeed()) - fmt.Printf("Successfully set namespace context to: %s\n", namespace) - - By(fmt.Sprintf("Creating Config map: %s", configMapName)) - Expect(utils.CreateNotebookConfigMap(namespace, configMapName, notebookFile, "test/e2e_rhoai/resources/feature_repo", testDir)).To(Succeed()) - fmt.Printf("ConfigMap %s created successfully\n", configMapName) - - By(fmt.Sprintf("Creating Persistent volume claim: %s", notebookPVC)) - Expect(utils.CreateNotebookPVC(pvcFile, testDir)).To(Succeed()) - fmt.Printf("Persistent Volume Claim %s created successfully\n", notebookPVC) - - By(fmt.Sprintf("Creating rolebinding %s for the user", rolebindingName)) - Expect(utils.CreateNotebookRoleBinding(namespace, rolebindingName, utils.GetOCUser(testDir), testDir)).To(Succeed()) - fmt.Printf("Created rolebinding %s successfully\n", rolebindingName) - - // Create and monitor notebook for execution status - createAndMonitorNotebook() + // Use the shared RunNotebookTest function for common setup and execution + utils.RunNotebookTest(namespace, configMapName, notebookFile, "test/e2e_rhoai/resources/feature_repo", pvcFile, rolebindingName, notebookPVC, notebookName, testDir) } BeforeAll(func() { diff --git a/infra/feast-operator/test/e2e_rhoai/feast_wb_milvus_test.go b/infra/feast-operator/test/e2e_rhoai/feast_wb_milvus_test.go index 91e5e4c2e41..4849670399d 100644 --- a/infra/feast-operator/test/e2e_rhoai/feast_wb_milvus_test.go +++ b/infra/feast-operator/test/e2e_rhoai/feast_wb_milvus_test.go @@ -52,34 +52,9 @@ var _ = Describe("Feast Jupyter Notebook Testing", Ordered, func() { fmt.Printf("Namespace %s deleted successfully\n", namespace) }) - runNotebookTest := func() { - // Execute common setup steps - By(fmt.Sprintf("Setting namespace context to : %s", namespace)) - Expect(utils.SetNamespaceContext(namespace, testDir)).To(Succeed()) - fmt.Printf("Successfully set namespace context to: %s\n", namespace) - - By(fmt.Sprintf("Creating Config map: %s", configMapName)) - Expect(utils.CreateNotebookConfigMap(namespace, configMapName, notebookFile, "test/e2e_rhoai/resources/feature_repo", testDir)).To(Succeed()) - fmt.Printf("ConfigMap %s created successfully\n", configMapName) - - By(fmt.Sprintf("Creating Persistent volume claim: %s", notebookPVC)) - Expect(utils.CreateNotebookPVC(pvcFile, testDir)).To(Succeed()) - fmt.Printf("Persistent Volume Claim %s created successfully\n", notebookPVC) - - By(fmt.Sprintf("Creating rolebinding %s for the user", rolebindingName)) - Expect(utils.CreateNotebookRoleBinding(namespace, rolebindingName, utils.GetOCUser(testDir), testDir)).To(Succeed()) - fmt.Printf("Created rolebinding %s successfully\n", rolebindingName) - - // Build notebook parameters and create notebook - nbParams := utils.GetNotebookParams(namespace, configMapName, notebookPVC, notebookName, testDir) - By("Creating Jupyter Notebook") - Expect(utils.CreateNotebook(nbParams)).To(Succeed(), "Failed to create notebook") - - By("Monitoring notebook logs") - Expect(utils.MonitorNotebookPod(namespace, "jupyter-nb-", notebookName)).To(Succeed(), "Notebook execution failed") - } - Context("Feast Jupyter Notebook Test", func() { - It("Should create and run a "+feastMilvusTest+" successfully", runNotebookTest) + It("Should create and run a "+feastMilvusTest+" successfully", func() { + utils.RunNotebookTest(namespace, configMapName, notebookFile, "test/e2e_rhoai/resources/feature_repo", pvcFile, rolebindingName, notebookPVC, notebookName, testDir) + }) }) }) diff --git a/infra/feast-operator/test/e2e_rhoai/feast_wb_ray_offline_store_test.go b/infra/feast-operator/test/e2e_rhoai/feast_wb_ray_offline_store_test.go new file mode 100644 index 00000000000..a7e482063f0 --- /dev/null +++ b/infra/feast-operator/test/e2e_rhoai/feast_wb_ray_offline_store_test.go @@ -0,0 +1,77 @@ +/* +Copyright 2025 Feast Community. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package e2erhoai provides end-to-end (E2E) test coverage for Feast integration with +// Red Hat OpenShift AI (RHOAI) environments. This specific test validates the functionality +// of executing a Feast Jupyter notebook with Ray offline store within a fully configured OpenShift namespace +package e2erhoai + +import ( + "fmt" + "os/exec" + + utils "github.com/feast-dev/feast/infra/feast-operator/test/utils" + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" +) + +var _ = Describe("Feast Jupyter Notebook Testing with Ray Offline Store", Ordered, func() { + const ( + namespace = "test-ns-feast-wb-ray" + configMapName = "feast-wb-ray-cm" + rolebindingName = "rb-feast-ray-test" + notebookFile = "test/e2e_rhoai/resources/feast-wb-ray-test.ipynb" + pvcFile = "test/e2e_rhoai/resources/pvc.yaml" + kueueResourcesFile = "test/e2e_rhoai/resources/kueue_resources_setup.yaml" + notebookPVC = "jupyterhub-nb-kube-3aadmin-pvc" + testDir = "/test/e2e_rhoai" + notebookName = "feast-wb-ray-test.ipynb" + feastRayTest = "TestFeastRayOfflineStoreNotebook" + ) + + BeforeAll(func() { + By(fmt.Sprintf("Creating test namespace: %s", namespace)) + Expect(utils.CreateNamespace(namespace, testDir)).To(Succeed()) + fmt.Printf("Namespace %s created successfully\n", namespace) + + By("Applying Kueue resources setup") + // Apply with namespace flag - cluster-scoped resources (ResourceFlavor, ClusterQueue) will be applied at cluster level, + // and namespace-scoped resources (LocalQueue) will be applied in the specified namespace + cmd := exec.Command("kubectl", "apply", "-f", kueueResourcesFile, "-n", namespace) + output, err := utils.Run(cmd, testDir) + Expect(err).ToNot(HaveOccurred(), fmt.Sprintf("Failed to apply Kueue resources: %v\nOutput: %s", err, output)) + fmt.Printf("Kueue resources applied successfully\n") + }) + + AfterAll(func() { + By("Deleting Kueue resources") + // Delete with namespace flag - will delete namespace-scoped resources from the namespace + // and cluster-scoped resources from the cluster + cmd := exec.Command("kubectl", "delete", "-f", kueueResourcesFile, "-n", namespace, "--ignore-not-found=true") + _, _ = utils.Run(cmd, testDir) + fmt.Printf("Kueue resources cleanup completed\n") + + By(fmt.Sprintf("Deleting test namespace: %s", namespace)) + Expect(utils.DeleteNamespace(namespace, testDir)).To(Succeed()) + fmt.Printf("Namespace %s deleted successfully\n", namespace) + }) + + Context("Feast Jupyter Notebook Test with Ray Offline store", func() { + It("Should create and run a "+feastRayTest+" successfully", func() { + utils.RunNotebookTest(namespace, configMapName, notebookFile, "test/e2e_rhoai/resources/feature_repo", pvcFile, rolebindingName, notebookPVC, notebookName, testDir) + }) + }) +}) diff --git a/infra/feast-operator/test/e2e_rhoai/resources/feast-wb-ray-test.ipynb b/infra/feast-operator/test/e2e_rhoai/resources/feast-wb-ray-test.ipynb new file mode 100644 index 00000000000..3b91bcccd8e --- /dev/null +++ b/infra/feast-operator/test/e2e_rhoai/resources/feast-wb-ray-test.ipynb @@ -0,0 +1,516 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# --- Configuration Variables ---\n", + "import os \n", + "\n", + "# Namespace where your resources exist\n", + "namespace = os.environ.get(\"NAMESPACE\")\n", + "\n", + "fsconfigmap = \"cm-fs-data\"\n", + "\n", + "# Fetch token and server directly from oc CLI\n", + "import subprocess\n", + "\n", + "def oc(cmd):\n", + " return subprocess.check_output(cmd, shell=True).decode(\"utf-8\").strip()\n", + "\n", + "token = oc(\"oc whoami -t\")\n", + "server = oc(\"oc whoami --show-server\")\n", + "\n", + "os.environ[\"CLUSTER_TOKEN\"] = token\n", + "os.environ[\"CLUSTER_SERVER\"] = server\n", + "\n", + "\n", + "# RayCluster name\n", + "raycluster = \"feastraytest\"\n", + "os.environ[\"RAY_CLUSTER\"] = raycluster\n", + "\n", + "# Show configured values\n", + "print(\"Configuration Variables:\")\n", + "print(f\" Namespace: {namespace}\")\n", + "print(f\" Server: {server}\")\n", + "print(f\" Token: {'*' * 20}\") # hide actual token\n", + "print(f\" Ray Cluster: {raycluster}\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "! git clone https://github.com/Srihari1192/feast-rag-ray.git" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%cd feast-rag-ray/feature_repo" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!oc login --token=$token --server=$server" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!oc create configmap $fsconfigmap --from-file=data/customer_daily_profile.parquet --from-file=data/driver_stats.parquet -n $namespace" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Import pieces from codeflare-sdk\n", + "from codeflare_sdk import Cluster, ClusterConfiguration, TokenAuthentication\n", + "\n", + "# Create authentication with token and server from oc\n", + "auth = TokenAuthentication(\n", + " token=token,\n", + " server=server,\n", + " skip_tls=True\n", + ")\n", + "auth.login()\n", + "print(\"āœ“ Authentication successful\")\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from kubernetes.client import (\n", + " V1Volume,\n", + " V1ConfigMapVolumeSource,\n", + " V1VolumeMount,\n", + ") \n", + "\n", + "data_volume = V1Volume(\n", + " name=\"data\",\n", + " config_map=V1ConfigMapVolumeSource(name=fsconfigmap)\n", + ")\n", + "\n", + "data_mount = V1VolumeMount(\n", + " name=\"data\",\n", + " mount_path=\"/opt/app-root/src/feast-rag-ray/feature_repo/data\",\n", + " read_only=True\n", + ")\n", + "\n", + "cluster = Cluster(ClusterConfiguration(\n", + " name=raycluster,\n", + " head_cpu_requests=1,\n", + " head_cpu_limits=1,\n", + " head_memory_requests=4,\n", + " head_memory_limits=4,\n", + " head_extended_resource_requests={'nvidia.com/gpu':0}, # For GPU enabled workloads set the head_extended_resource_requests and worker_extended_resource_requests\n", + " worker_extended_resource_requests={'nvidia.com/gpu':0},\n", + " num_workers=2,\n", + " worker_cpu_requests='250m',\n", + " worker_cpu_limits=1,\n", + " worker_memory_requests=4,\n", + " worker_memory_limits=4,\n", + " # image=\"\", # Optional Field \n", + " write_to_file=False, # When enabled Ray Cluster yaml files are written to /HOME/.codeflare/resources\n", + " local_queue=\"fs-user-queue\", # Specify the local queue manually\n", + " # ⭐ Best method: Use secretKeyRef to expose AWS credentials safely\n", + " volumes=[data_volume],\n", + " volume_mounts=[data_mount],\n", + " \n", + "))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "cluster.apply()\n", + "# cluster.wait_ready()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import time\n", + "\n", + "MAX_WAIT = 180 # 3 minutes\n", + "INTERVAL = 5 # check every 5 seconds\n", + "elapsed = 0\n", + "\n", + "print(\"ā³ Waiting up to 3 minutes for RayCluster to be READY...\\n\")\n", + "\n", + "while elapsed < MAX_WAIT:\n", + " details = cluster.details()\n", + " status = details.status.value\n", + "\n", + " print(details)\n", + " print(\"Cluster Status:\", status)\n", + "\n", + " if status == \"ready\":\n", + " print(\"āœ… RayCluster is READY!\")\n", + " break\n", + " \n", + " print(f\"ā³ RayCluster is NOT ready yet: {status} ... checking again in {INTERVAL}s\\n\")\n", + " time.sleep(INTERVAL)\n", + " elapsed += INTERVAL\n", + "\n", + "else:\n", + " print(\"āŒ Timeout: RayCluster did NOT become READY within 3 minutes.\")\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "! feast apply" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import sys\n", + "from pathlib import Path\n", + "from feast import FeatureStore\n", + "\n", + "# Add feature repo to PYTHONPATH\n", + "repo_path = Path(\".\")\n", + "sys.path.append(str(repo_path))\n", + "\n", + "# Initialize Feature Store\n", + "print(\"Initializing Feast with Ray configuration...\")\n", + "store = FeatureStore(repo_path=\".\")\n", + "\n", + "# Assertions: Verify store is initialized correctly\n", + "assert store is not None, \"FeatureStore should be initialized\"\n", + "assert store.config is not None, \"Store config should be available\"\n", + "assert store.config.offline_store is not None, \"Offline store should be configured\"\n", + "\n", + "print(f\"āœ“ Offline store: {store.config.offline_store.type}\")\n", + "if hasattr(store.config, \"batch_engine\") and store.config.batch_engine:\n", + " print(f\"āœ“ Compute engine: {store.config.batch_engine.type}\")\n", + " # Assertion: Verify batch engine is configured if present\n", + " assert store.config.batch_engine.type is not None, \"Batch engine type should be set\"\n", + "else:\n", + " print(\"⚠ No compute engine configured\")\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 2. Create Entity DataFrame\n", + "\n", + "Create an entity DataFrame for historical feature retrieval with point-in-time timestamps.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from datetime import datetime, timedelta\n", + "import pandas as pd\n", + "\n", + "# --- Create time window ---\n", + "end_date = datetime.now().replace(microsecond=0, second=0, minute=0)\n", + "start_date = end_date - timedelta(days=2)\n", + "\n", + "\n", + "entity_df = pd.DataFrame(\n", + " {\n", + " \"driver_id\": [1001, 1002, 1003],\n", + " \"customer_id\": [2001, 2002, 2003],\n", + " \"event_timestamp\": [\n", + " pd.Timestamp(end_date - timedelta(hours=24), tz=\"UTC\"),\n", + " pd.Timestamp(end_date - timedelta(hours=12), tz=\"UTC\"),\n", + " pd.Timestamp(end_date - timedelta(hours=6), tz=\"UTC\"),\n", + " ],\n", + " }\n", + ")\n", + "\n", + "# Assertions: Verify entity DataFrame is created correctly\n", + "assert len(entity_df) == 3, f\"Expected 3 rows, got {len(entity_df)}\"\n", + "assert \"driver_id\" in entity_df.columns, \"driver_id column should be present\"\n", + "assert \"customer_id\" in entity_df.columns, \"customer_id column should be present\"\n", + "assert \"event_timestamp\" in entity_df.columns, \"event_timestamp column should be present\"\n", + "assert all(entity_df[\"driver_id\"].isin([1001, 1002, 1003])), \"driver_id values should match expected\"\n", + "assert all(entity_df[\"customer_id\"].isin([2001, 2002, 2003])), \"customer_id values should match expected\"\n", + "assert entity_df[\"event_timestamp\"].notna().all(), \"All event_timestamp values should be non-null\"\n", + "\n", + "print(f\"āœ“ Created entity DataFrame with {len(entity_df)} rows\")\n", + "print(f\"āœ“ Time range: {start_date} to {end_date}\")\n", + "print(\"\\nEntity DataFrame:\")\n", + "print(entity_df)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 3. Retrieve Historical Features\n", + "\n", + "Retrieve historical features using Ray compute engine for distributed point-in-time joins.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Cell 4: Retrieve Historical Features\n", + "print(\"Retrieving historical features with Ray compute engine...\")\n", + "print(\"(This demonstrates distributed point-in-time joins)\")\n", + "\n", + "try:\n", + " # Get historical features - this uses Ray compute engine for distributed processing\n", + " historical_features = store.get_historical_features(\n", + " entity_df=entity_df,\n", + " features=[\n", + " \"driver_hourly_stats:conv_rate\",\n", + " \"driver_hourly_stats:acc_rate\",\n", + " \"driver_hourly_stats:avg_daily_trips\",\n", + " \"customer_daily_profile:current_balance\",\n", + " \"customer_daily_profile:avg_passenger_count\",\n", + " \"customer_daily_profile:lifetime_trip_count\",\n", + " ],\n", + " )\n", + "\n", + " # Convert to DataFrame - Ray processes this efficiently\n", + " historical_df = historical_features.to_df()\n", + " \n", + " # Assertions: Verify historical features are retrieved correctly\n", + " assert historical_df is not None, \"Historical features DataFrame should not be None\"\n", + " assert len(historical_df) > 0, \"Should retrieve at least one row of historical features\"\n", + " assert \"driver_id\" in historical_df.columns, \"driver_id should be in the result\"\n", + " assert \"customer_id\" in historical_df.columns, \"customer_id should be in the result\"\n", + " \n", + " # Verify expected feature columns are present (some may be None if data doesn't exist)\n", + " expected_features = [\n", + " \"conv_rate\", \"acc_rate\", \"avg_daily_trips\",\n", + " \"current_balance\", \"avg_passenger_count\", \"lifetime_trip_count\"\n", + " ]\n", + " feature_columns = [col for col in historical_df.columns if col in expected_features]\n", + " assert len(feature_columns) > 0, f\"Should have at least one feature column, got: {historical_df.columns.tolist()}\"\n", + " \n", + " print(f\"āœ“ Retrieved {len(historical_df)} historical feature rows\")\n", + " print(f\"āœ“ Features: {list(historical_df.columns)}\")\n", + " \n", + " # Display the results\n", + " print(\"\\nHistorical Features DataFrame:\")\n", + " display(historical_df.head(10))\n", + "\n", + "except Exception as e:\n", + " print(f\"⚠ Historical features retrieval failed: {e}\")\n", + " print(\"This might be due to missing Ray dependencies or data\")\n", + " raise\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 4. Test On-Demand Feature Transformations\n", + "\n", + "Demonstrate on-demand feature transformations that are computed at request time.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Cell 5: Test On-Demand Features\n", + "print(\"Testing on-demand feature transformations...\")\n", + "\n", + "try:\n", + " # Get features including on-demand transformations\n", + " features_with_odfv = store.get_historical_features(\n", + " entity_df=entity_df.head(1),\n", + " features=[\n", + " \"driver_hourly_stats:conv_rate\",\n", + " \"driver_hourly_stats:acc_rate\",\n", + " \"driver_hourly_stats:avg_daily_trips\",\n", + " \"driver_activity_v2:conv_rate_plus_acc_rate\",\n", + " \"driver_activity_v2:trips_per_day_normalized\",\n", + " ],\n", + " )\n", + "\n", + " odfv_df = features_with_odfv.to_df()\n", + " \n", + " # Assertions: Verify on-demand features are computed correctly\n", + " assert odfv_df is not None, \"On-demand features DataFrame should not be None\"\n", + " assert len(odfv_df) > 0, \"Should retrieve at least one row with on-demand features\"\n", + " assert \"driver_id\" in odfv_df.columns, \"driver_id should be in the result\"\n", + " \n", + " # Verify on-demand feature columns if they exist\n", + " if \"conv_rate_plus_acc_rate\" in odfv_df.columns:\n", + " # Assertion: Verify the on-demand feature is computed\n", + " assert odfv_df[\"conv_rate_plus_acc_rate\"].notna().any(), \"conv_rate_plus_acc_rate should have non-null values\"\n", + " print(\"āœ“ On-demand feature 'conv_rate_plus_acc_rate' is computed\")\n", + " \n", + " if \"trips_per_day_normalized\" in odfv_df.columns:\n", + " assert odfv_df[\"trips_per_day_normalized\"].notna().any(), \"trips_per_day_normalized should have non-null values\"\n", + " print(\"āœ“ On-demand feature 'trips_per_day_normalized' is computed\")\n", + " \n", + " print(f\"āœ“ Retrieved {len(odfv_df)} rows with on-demand transformations\")\n", + " \n", + " # Display results\n", + " print(\"\\nFeatures with On-Demand Transformations:\")\n", + " display(odfv_df)\n", + " \n", + " # Show specific transformed features\n", + " if \"conv_rate_plus_acc_rate\" in odfv_df.columns:\n", + " print(\"\\nSample with on-demand features:\")\n", + " display(\n", + " odfv_df[[\"driver_id\", \"conv_rate\", \"acc_rate\", \"conv_rate_plus_acc_rate\"]]\n", + " )\n", + "\n", + "except Exception as e:\n", + " print(f\"⚠ On-demand features failed: {e}\")\n", + " raise\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 5. Materialize Features to Online Store\n", + "\n", + "Materialize features to the online store using Ray compute engine for efficient batch processing.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from datetime import timezone\n", + "print(\"Materializing features to online store...\")\n", + "store.materialize(\n", + "\tstart_date=datetime(2025, 1, 1, tzinfo=timezone.utc),\n", + "\tend_date=end_date,\n", + ")\n", + "\n", + "# Minimal output assertion: materialization succeeded if no exception\n", + "assert True, \"Materialization completed successfully\"\n", + "print(\"āœ“ Initial materialization successful\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 6. Test Online Feature Serving\n", + "\n", + "Retrieve features from the online store for low-latency serving.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Cell 7: Test Online Feature Serving\n", + "print(\"Testing online feature serving...\")\n", + "\n", + "try:\n", + " entity_rows = [\n", + " {\"driver_id\": 1001, \"customer_id\": 2001},\n", + " {\"driver_id\": 1002, \"customer_id\": 2002},\n", + " ]\n", + " \n", + " # Assertion: Verify entity rows are valid\n", + " assert len(entity_rows) == 2, \"Should have 2 entity rows\"\n", + " assert all(\"driver_id\" in row for row in entity_rows), \"All entity rows should have driver_id\"\n", + " assert all(\"customer_id\" in row for row in entity_rows), \"All entity rows should have customer_id\"\n", + " \n", + " online_features = store.get_online_features(\n", + " features=[\n", + " \"driver_hourly_stats:conv_rate\",\n", + " \"driver_hourly_stats:acc_rate\",\n", + " \"customer_daily_profile:current_balance\",\n", + " ],\n", + " entity_rows=entity_rows,\n", + " )\n", + "\n", + " online_df = online_features.to_df()\n", + " \n", + " # Assertions: Verify online features are retrieved correctly\n", + " assert online_df is not None, \"Online features DataFrame should not be None\"\n", + " assert len(online_df) == len(entity_rows), f\"Should retrieve {len(entity_rows)} rows, got {len(online_df)}\"\n", + " assert \"driver_id\" in online_df.columns, \"driver_id should be in the result\"\n", + " assert \"customer_id\" in online_df.columns, \"customer_id should be in the result\"\n", + " \n", + " # Verify expected feature columns are present\n", + " expected_features = [\"conv_rate\", \"acc_rate\", \"current_balance\"]\n", + " feature_columns = [col for col in online_df.columns if col in expected_features]\n", + " assert len(feature_columns) > 0, f\"Should have at least one feature column, got: {online_df.columns.tolist()}\"\n", + " \n", + " # Verify entity IDs match\n", + " assert all(online_df[\"driver_id\"].isin([1001, 1002])), \"driver_id values should match entity rows\"\n", + " assert all(online_df[\"customer_id\"].isin([2001, 2002])), \"customer_id values should match entity rows\"\n", + " \n", + " print(f\"āœ“ Retrieved {len(online_df)} online feature rows\")\n", + " print(f\"āœ“ Features retrieved: {feature_columns}\")\n", + " \n", + " print(\"\\nOnline Features DataFrame:\")\n", + " display(online_df)\n", + "\n", + "except Exception as e:\n", + " print(f\"⚠ Online serving failed: {e}\")\n", + " raise\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "cluster.down()" + ] + } + ], + "metadata": { + "language_info": { + "name": "python" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/infra/feast-operator/test/e2e_rhoai/resources/feast_kube_auth.yaml b/infra/feast-operator/test/e2e_rhoai/resources/feast_kube_auth.yaml index 2ec348c63db..fae126b528a 100644 --- a/infra/feast-operator/test/e2e_rhoai/resources/feast_kube_auth.yaml +++ b/infra/feast-operator/test/e2e_rhoai/resources/feast_kube_auth.yaml @@ -13,7 +13,7 @@ stringData: echo: false pool_pre_ping: true --- -apiVersion: feast.dev/v1alpha1 +apiVersion: feast.dev/v1 kind: FeatureStore metadata: name: credit-scoring diff --git a/infra/feast-operator/test/e2e_rhoai/resources/kueue_resources_setup.yaml b/infra/feast-operator/test/e2e_rhoai/resources/kueue_resources_setup.yaml new file mode 100644 index 00000000000..ebcac54f4a0 --- /dev/null +++ b/infra/feast-operator/test/e2e_rhoai/resources/kueue_resources_setup.yaml @@ -0,0 +1,31 @@ +apiVersion: kueue.x-k8s.io/v1beta1 +kind: ResourceFlavor +metadata: + name: "fs-resource-flavor" +--- +apiVersion: kueue.x-k8s.io/v1beta1 +kind: ClusterQueue +metadata: + name: "fs-cluster-queue" +spec: + namespaceSelector: {} # match all. + resourceGroups: + - coveredResources: ["cpu", "memory","nvidia.com/gpu"] + flavors: + - name: "fs-resource-flavor" + resources: + - name: "cpu" + nominalQuota: 9 + - name: "memory" + nominalQuota: 36Gi + - name: "nvidia.com/gpu" + nominalQuota: 0 +--- +apiVersion: kueue.x-k8s.io/v1beta1 +kind: LocalQueue +metadata: + name: "fs-user-queue" + annotations: + "kueue.x-k8s.io/default-queue": "true" +spec: + clusterQueue: "fs-cluster-queue" diff --git a/infra/feast-operator/test/testdata/feast_integration_test_crs/v1_default_featurestore.yaml b/infra/feast-operator/test/testdata/feast_integration_test_crs/v1_default_featurestore.yaml new file mode 100644 index 00000000000..7c28a46145e --- /dev/null +++ b/infra/feast-operator/test/testdata/feast_integration_test_crs/v1_default_featurestore.yaml @@ -0,0 +1,14 @@ +apiVersion: feast.dev/v1 +kind: FeatureStore +metadata: + name: simple-feast-setup +spec: + feastProject: my_project + services: + offlineStore: + server: {} + registry: + local: + server: {} + ui: {} + diff --git a/infra/feast-operator/test/testdata/feast_integration_test_crs/v1_remote_registry_featurestore.yaml b/infra/feast-operator/test/testdata/feast_integration_test_crs/v1_remote_registry_featurestore.yaml new file mode 100644 index 00000000000..83b4e327829 --- /dev/null +++ b/infra/feast-operator/test/testdata/feast_integration_test_crs/v1_remote_registry_featurestore.yaml @@ -0,0 +1,16 @@ +apiVersion: feast.dev/v1 +kind: FeatureStore +metadata: + name: simple-feast-remote-setup +spec: + feastProject: my_project + services: + offlineStore: + server: {} + ui: {} + registry: + remote: + feastRef: + name: simple-feast-setup + namespace: test-ns-feast + diff --git a/infra/feast-operator/test/utils/notebook_util.go b/infra/feast-operator/test/utils/notebook_util.go index a4a3e14a2ce..cad4a734a01 100644 --- a/infra/feast-operator/test/utils/notebook_util.go +++ b/infra/feast-operator/test/utils/notebook_util.go @@ -9,6 +9,7 @@ import ( "text/template" "time" + . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" ) @@ -340,3 +341,32 @@ func SetupNotebookEnvironment(namespace, configMapName, notebookFile, featureRep return nil } + +// RunNotebookTest performs all the setup steps, creates a notebook, and monitors its execution. +// This function encapsulates the common notebook test workflow used across multiple test files. +func RunNotebookTest(namespace, configMapName, notebookFile, featureRepoPath, pvcFile, rolebindingName, notebookPVC, notebookName, testDir string) { + // Execute common setup steps + By(fmt.Sprintf("Setting namespace context to : %s", namespace)) + Expect(SetNamespaceContext(namespace, testDir)).To(Succeed()) + fmt.Printf("Successfully set namespace context to: %s\n", namespace) + + By(fmt.Sprintf("Creating Config map: %s", configMapName)) + Expect(CreateNotebookConfigMap(namespace, configMapName, notebookFile, featureRepoPath, testDir)).To(Succeed()) + fmt.Printf("ConfigMap %s created successfully\n", configMapName) + + By(fmt.Sprintf("Creating Persistent volume claim: %s", notebookPVC)) + Expect(CreateNotebookPVC(pvcFile, testDir)).To(Succeed()) + fmt.Printf("Persistent Volume Claim %s created successfully\n", notebookPVC) + + By(fmt.Sprintf("Creating rolebinding %s for the user", rolebindingName)) + Expect(CreateNotebookRoleBinding(namespace, rolebindingName, GetOCUser(testDir), testDir)).To(Succeed()) + fmt.Printf("Created rolebinding %s successfully\n", rolebindingName) + + // Build notebook parameters and create notebook + nbParams := GetNotebookParams(namespace, configMapName, notebookPVC, notebookName, testDir) + By("Creating Jupyter Notebook") + Expect(CreateNotebook(nbParams)).To(Succeed(), "Failed to create notebook") + + By("Monitoring notebook logs") + Expect(MonitorNotebookPod(namespace, "jupyter-nb-", notebookName)).To(Succeed(), "Notebook execution failed") +} diff --git a/infra/feast-operator/test/utils/test_util.go b/infra/feast-operator/test/utils/test_util.go index 3ec35043aef..1f476708d4c 100644 --- a/infra/feast-operator/test/utils/test_util.go +++ b/infra/feast-operator/test/utils/test_util.go @@ -16,7 +16,7 @@ import ( appsv1 "k8s.io/api/apps/v1" "github.com/feast-dev/feast/infra/feast-operator/api/feastversion" - "github.com/feast-dev/feast/infra/feast-operator/api/v1alpha1" + feastdevv1 "github.com/feast-dev/feast/infra/feast-operator/api/v1" ) const ( @@ -47,7 +47,7 @@ func checkIfFeatureStoreCustomResourceConditionsInReady(featureStoreName, namesp } // Parse the JSON into FeatureStore - var resource v1alpha1.FeatureStore + var resource feastdevv1.FeatureStore if err := json.Unmarshal(out.Bytes(), &resource); err != nil { return fmt.Errorf("failed to parse the resource JSON. Error: %v", err) } @@ -216,7 +216,7 @@ func isFeatureStoreHavingRemoteRegistry(namespace, featureStoreName string) (boo } // Parse the JSON into a map - var registryConfig v1alpha1.Registry + var registryConfig feastdevv1.Registry if err := json.Unmarshal([]byte(result), ®istryConfig); err != nil { return false, err // Return false on JSON parsing failure } diff --git a/infra/website/src/pages/index.astro b/infra/website/src/pages/index.astro index 9c0b8ba1d9e..fd651d86a02 100644 --- a/infra/website/src/pages/index.astro +++ b/infra/website/src/pages/index.astro @@ -114,9 +114,6 @@ features = store.retrieve_online_documents(
-
- -
diff --git a/java/pom.xml b/java/pom.xml index 4f32be3bdf8..dc82528c7c7 100644 --- a/java/pom.xml +++ b/java/pom.xml @@ -35,7 +35,7 @@ - 0.57.0 + 0.58.0 https://github.com/feast-dev/feast UTF-8 diff --git a/sdk/python/feast/cli/cli.py b/sdk/python/feast/cli/cli.py index 91fa2a92606..60ea6292488 100644 --- a/sdk/python/feast/cli/cli.py +++ b/sdk/python/feast/cli/cli.py @@ -412,13 +412,18 @@ def materialize_incremental_command(ctx: click.Context, end_ts: str, views: List "milvus", "ray", "ray_rag", + "pytorch_nlp", ], case_sensitive=False, ), help="Specify a template for the created project", default="local", ) -def init_command(project_directory, minimal: bool, template: str): +@click.option( + "--repo-path", + help="Directory path where the repository will be created (default: create subdirectory with project name)", +) +def init_command(project_directory, minimal: bool, template: str, repo_path: str): """Create a new Feast repository""" if not project_directory: project_directory = generate_project_name() @@ -426,7 +431,7 @@ def init_command(project_directory, minimal: bool, template: str): if minimal: template = "minimal" - init_repo(project_directory, template) + init_repo(project_directory, template, repo_path) @cli.command("listen") diff --git a/sdk/python/feast/feature_server.py b/sdk/python/feast/feature_server.py index e3ec16496cc..fbbb38821af 100644 --- a/sdk/python/feast/feature_server.py +++ b/sdk/python/feast/feature_server.py @@ -155,6 +155,52 @@ async def _get_features( return features +async def load_static_artifacts(app: FastAPI, store): + """ + Load static artifacts (models, lookup tables, etc.) into app.state. + + This function can be extended to load various types of static artifacts: + - Small ML models (scikit-learn, small neural networks) + - Lookup tables and reference data + - Configuration parameters + - Pre-computed embeddings + + Note: Not recommended for large language models - use dedicated + model serving solutions (vLLM, TGI, etc.) for those. + """ + try: + # Import here to avoid loading heavy dependencies unless needed + import importlib.util + import inspect + from pathlib import Path + + # Look for static artifacts loading in the feature repository + # This allows templates and users to define their own artifact loading + repo_path = Path(store.repo_path) if store.repo_path else Path.cwd() + artifacts_file = repo_path / "static_artifacts.py" + + if artifacts_file.exists(): + # Load and execute custom static artifacts loading + spec = importlib.util.spec_from_file_location( + "static_artifacts", artifacts_file + ) + if spec and spec.loader: + artifacts_module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(artifacts_module) + + # Look for load_artifacts function + if hasattr(artifacts_module, "load_artifacts"): + load_func = artifacts_module.load_artifacts + if inspect.iscoroutinefunction(load_func): + await load_func(app) + else: + load_func(app) + logger.info("Loaded static artifacts from static_artifacts.py") + except Exception as e: + # Non-fatal error - feature server should still start + logger.warning(f"Failed to load static artifacts: {e}") + + def get_app( store: "feast.FeatureStore", registry_ttl_sec: int = DEFAULT_FEATURE_SERVER_REGISTRY_TTL, @@ -217,6 +263,9 @@ def async_refresh(): @asynccontextmanager async def lifespan(app: FastAPI): + # Load static artifacts before initializing store + await load_static_artifacts(app, store) + await store.initialize() async_refresh() yield diff --git a/sdk/python/feast/infra/feature_servers/multicloud/requirements.txt b/sdk/python/feast/infra/feature_servers/multicloud/requirements.txt index 4f144773215..a169a0b19ca 100644 --- a/sdk/python/feast/infra/feature_servers/multicloud/requirements.txt +++ b/sdk/python/feast/infra/feature_servers/multicloud/requirements.txt @@ -1,2 +1,2 @@ # keep VERSION on line #2, this is critical to release CI -feast[minimal] == 0.57.0 +feast[minimal] == 0.58.0 diff --git a/sdk/python/feast/infra/offline_stores/contrib/clickhouse_offline_store/tests/data_source.py b/sdk/python/feast/infra/offline_stores/contrib/clickhouse_offline_store/tests/data_source.py index 80fd1751dc5..4234c46eb3f 100644 --- a/sdk/python/feast/infra/offline_stores/contrib/clickhouse_offline_store/tests/data_source.py +++ b/sdk/python/feast/infra/offline_stores/contrib/clickhouse_offline_store/tests/data_source.py @@ -15,6 +15,8 @@ from feast.infra.offline_stores.contrib.clickhouse_offline_store.clickhouse_source import ( ClickhouseSource, ) +from feast.infra.utils.clickhouse.clickhouse_config import ClickhouseConfig +from feast.infra.utils.clickhouse.connection_utils import get_client from tests.integration.feature_repos.universal.data_source_creator import ( DataSourceCreator, ) @@ -114,3 +116,29 @@ def create_saved_dataset_destination(self): def teardown(self): pass + + +def test_get_client_with_additional_params(clickhouse_container): + """ + Test that get_client works with a real ClickHouse container and properly passes + additional settings like send_receive_timeout. + """ + # Create config with custom send_receive_timeout + config = ClickhouseConfig( + host=clickhouse_container.get_container_host_ip(), + port=clickhouse_container.get_exposed_port(8123), + user=CLICKHOUSE_USER, + password=CLICKHOUSE_PASSWORD, + database=CLICKHOUSE_OFFLINE_DB, + additional_client_args={"send_receive_timeout": 60}, + ) + + # Get client and verify it works + client = get_client(config) + + # Verify client is connected and functional by running a simple query + result = client.query("SELECT 1 AS test_value") + assert result.result_rows == [(1,)] + + # Verify the send_receive_timeout was applied + assert client.timeout._read == 60 diff --git a/sdk/python/feast/infra/utils/clickhouse/clickhouse_config.py b/sdk/python/feast/infra/utils/clickhouse/clickhouse_config.py index 1f163e0a81b..75167f8a60e 100644 --- a/sdk/python/feast/infra/utils/clickhouse/clickhouse_config.py +++ b/sdk/python/feast/infra/utils/clickhouse/clickhouse_config.py @@ -1,3 +1,5 @@ +from typing import Any + from pydantic import ConfigDict, StrictStr from feast.repo_config import FeastConfigBaseModel @@ -11,4 +13,8 @@ class ClickhouseConfig(FeastConfigBaseModel): password: StrictStr use_temporary_tables_for_entity_df: bool = True + # See https://github.com/ClickHouse/clickhouse-connect/blob/main/clickhouse_connect/driver/__init__.py#L51 + # Some typical ones e.g. send_receive_timeout (read_timeout), etc + additional_client_args: dict[str, Any] | None = None + model_config = ConfigDict(frozen=True) diff --git a/sdk/python/feast/infra/utils/clickhouse/connection_utils.py b/sdk/python/feast/infra/utils/clickhouse/connection_utils.py index 88f5334db14..6d5f1b87052 100644 --- a/sdk/python/feast/infra/utils/clickhouse/connection_utils.py +++ b/sdk/python/feast/infra/utils/clickhouse/connection_utils.py @@ -11,12 +11,24 @@ def get_client(config: ClickhouseConfig) -> Client: # Clickhouse client is not thread-safe, so we need to create a separate instance for each thread. if not hasattr(thread_local, "clickhouse_client"): - thread_local.clickhouse_client = clickhouse_connect.get_client( - host=config.host, - port=config.port, - user=config.user, - password=config.password, - database=config.database, - ) + additional_client_args = config.additional_client_args + + if additional_client_args: + thread_local.clickhouse_client = clickhouse_connect.get_client( + host=config.host, + port=config.port, + user=config.user, + password=config.password, + database=config.database, + **additional_client_args, + ) + else: + thread_local.clickhouse_client = clickhouse_connect.get_client( + host=config.host, + port=config.port, + user=config.user, + password=config.password, + database=config.database, + ) return thread_local.clickhouse_client diff --git a/sdk/python/feast/repo_operations.py b/sdk/python/feast/repo_operations.py index 8eae581a260..9bc11e625f5 100644 --- a/sdk/python/feast/repo_operations.py +++ b/sdk/python/feast/repo_operations.py @@ -445,27 +445,37 @@ def cli_check_repo(repo_path: Path, fs_yaml_file: Path): sys.exit(1) -def init_repo(repo_name: str, template: str): +def init_repo(repo_name: str, template: str, repo_path: Optional[str] = None): import os from pathlib import Path from shutil import copytree from colorama import Fore, Style + # Validate project name if not is_valid_name(repo_name): raise BadParameter( message="Name should be alphanumeric values, underscores, and hyphens but not start with an underscore or hyphen", param_hint="PROJECT_DIRECTORY", ) - repo_path = Path(os.path.join(Path.cwd(), repo_name)) - repo_path.mkdir(exist_ok=True) - repo_config_path = repo_path / "feature_store.yaml" - if repo_config_path.exists(): - new_directory = os.path.relpath(repo_path, os.getcwd()) + # Determine where to create the repository + if repo_path: + # User specified a custom path + target_path = Path(repo_path).resolve() + target_path.mkdir(parents=True, exist_ok=True) + display_path = repo_path + else: + # Default behavior: create subdirectory with project name + target_path = Path(os.path.join(Path.cwd(), repo_name)) + target_path.mkdir(exist_ok=True) + display_path = repo_name + repo_config_path = target_path / "feature_store.yaml" + + if repo_config_path.exists(): print( - f"The directory {Style.BRIGHT + Fore.GREEN}{new_directory}{Style.RESET_ALL} contains an existing feature " + f"The directory {Style.BRIGHT + Fore.GREEN}{display_path}{Style.RESET_ALL} contains an existing feature " f"store repository that may cause a conflict" ) print() @@ -475,14 +485,14 @@ def init_repo(repo_name: str, template: str): template_path = str(Path(Path(__file__).parent / "templates" / template).absolute()) if not os.path.exists(template_path): raise IOError(f"Could not find template {template}") - copytree(template_path, str(repo_path), dirs_exist_ok=True) + copytree(template_path, str(target_path), dirs_exist_ok=True) # Rename gitignore files back to .gitignore - for gitignore_path in repo_path.rglob("gitignore"): + for gitignore_path in target_path.rglob("gitignore"): gitignore_path.rename(gitignore_path.with_name(".gitignore")) # Seed the repository - bootstrap_path = repo_path / "bootstrap.py" + bootstrap_path = target_path / "bootstrap.py" if os.path.exists(bootstrap_path): import importlib.util @@ -495,7 +505,7 @@ def init_repo(repo_name: str, template: str): os.remove(bootstrap_path) # Template the feature_store.yaml file - feature_store_yaml_path = repo_path / "feature_repo" / "feature_store.yaml" + feature_store_yaml_path = target_path / "feature_repo" / "feature_store.yaml" replace_str_in_file( feature_store_yaml_path, "project: my_project", f"project: {repo_name}" ) @@ -503,13 +513,13 @@ def init_repo(repo_name: str, template: str): # Remove the __pycache__ folder if it exists import shutil - shutil.rmtree(repo_path / "__pycache__", ignore_errors=True) + shutil.rmtree(target_path / "__pycache__", ignore_errors=True) import click click.echo() click.echo( - f"Creating a new Feast repository in {Style.BRIGHT + Fore.GREEN}{repo_path}{Style.RESET_ALL}." + f"Creating a new Feast repository in {Style.BRIGHT + Fore.GREEN}{target_path}{Style.RESET_ALL}." ) click.echo() diff --git a/sdk/python/feast/templates/aws/README.md b/sdk/python/feast/templates/aws/README.md index 008a338e984..10ae4887551 100644 --- a/sdk/python/feast/templates/aws/README.md +++ b/sdk/python/feast/templates/aws/README.md @@ -2,7 +2,7 @@ A quick view of what's in this repository: * `data/` contains raw demo parquet data -* `example_repo.py` contains demo feature definitions +* `feature_definitions.py` contains demo feature definitions * `feature_store.yaml` contains a demo setup configuring where data sources are * `test_workflow.py` showcases how to run all key Feast commands, including defining, retrieving, and pushing features. diff --git a/sdk/python/feast/templates/aws/bootstrap.py b/sdk/python/feast/templates/aws/bootstrap.py index 63e5b50203b..9ec3d322b19 100644 --- a/sdk/python/feast/templates/aws/bootstrap.py +++ b/sdk/python/feast/templates/aws/bootstrap.py @@ -55,7 +55,7 @@ def bootstrap(): ) repo_path = pathlib.Path(__file__).parent.absolute() / "feature_repo" - example_py_file = repo_path / "example_repo.py" + example_py_file = repo_path / "feature_definitions.py" replace_str_in_file(example_py_file, "%REDSHIFT_DATABASE%", database) config_file = repo_path / "feature_store.yaml" diff --git a/sdk/python/feast/templates/aws/feature_repo/example_repo.py b/sdk/python/feast/templates/aws/feature_repo/feature_definitions.py similarity index 100% rename from sdk/python/feast/templates/aws/feature_repo/example_repo.py rename to sdk/python/feast/templates/aws/feature_repo/feature_definitions.py diff --git a/sdk/python/feast/templates/cassandra/bootstrap.py b/sdk/python/feast/templates/cassandra/bootstrap.py index 16c82316258..cfd1fddacf2 100644 --- a/sdk/python/feast/templates/cassandra/bootstrap.py +++ b/sdk/python/feast/templates/cassandra/bootstrap.py @@ -273,8 +273,8 @@ def bootstrap(): driver_stats_path = data_path / "driver_stats.parquet" driver_df.to_parquet(path=str(driver_stats_path), allow_truncated_timestamps=True) - # example_repo.py - example_py_file = repo_path / "example_repo.py" + # feature_definitions.py + example_py_file = repo_path / "feature_definitions.py" replace_str_in_file( example_py_file, "%PARQUET_PATH%", str(driver_stats_path.relative_to(repo_path)) ) diff --git a/sdk/python/feast/templates/cassandra/feature_repo/example_repo.py b/sdk/python/feast/templates/cassandra/feature_repo/feature_definitions.py similarity index 100% rename from sdk/python/feast/templates/cassandra/feature_repo/example_repo.py rename to sdk/python/feast/templates/cassandra/feature_repo/feature_definitions.py diff --git a/sdk/python/feast/templates/couchbase/feature_repo/example_repo.py b/sdk/python/feast/templates/couchbase/feature_repo/feature_definitions.py similarity index 100% rename from sdk/python/feast/templates/couchbase/feature_repo/example_repo.py rename to sdk/python/feast/templates/couchbase/feature_repo/feature_definitions.py diff --git a/sdk/python/feast/templates/gcp/README.md b/sdk/python/feast/templates/gcp/README.md index bc9e51769c9..7f28fdbfa2f 100644 --- a/sdk/python/feast/templates/gcp/README.md +++ b/sdk/python/feast/templates/gcp/README.md @@ -2,7 +2,7 @@ A quick view of what's in this repository: * `data/` contains raw demo parquet data -* `example_repo.py` contains demo feature definitions +* `feature_definitions.py` contains demo feature definitions * `feature_store.yaml` contains a demo setup configuring where data sources are * `test_workflow.py` showcases how to run all key Feast commands, including defining, retrieving, and pushing features. diff --git a/sdk/python/feast/templates/gcp/feature_repo/example_repo.py b/sdk/python/feast/templates/gcp/feature_repo/feature_definitions.py similarity index 100% rename from sdk/python/feast/templates/gcp/feature_repo/example_repo.py rename to sdk/python/feast/templates/gcp/feature_repo/feature_definitions.py diff --git a/sdk/python/feast/templates/hazelcast/bootstrap.py b/sdk/python/feast/templates/hazelcast/bootstrap.py index 7a2b49d2493..9db778c2bd0 100644 --- a/sdk/python/feast/templates/hazelcast/bootstrap.py +++ b/sdk/python/feast/templates/hazelcast/bootstrap.py @@ -163,8 +163,8 @@ def bootstrap(): driver_stats_path = data_path / "driver_stats.parquet" driver_df.to_parquet(path=str(driver_stats_path), allow_truncated_timestamps=True) - # example_repo.py - example_py_file = repo_path / "example_repo.py" + # feature_definitions.py + example_py_file = repo_path / "feature_definitions.py" replace_str_in_file( example_py_file, "%PARQUET_PATH%", str(driver_stats_path.relative_to(repo_path)) ) diff --git a/sdk/python/feast/templates/hazelcast/feature_repo/example_repo.py b/sdk/python/feast/templates/hazelcast/feature_repo/feature_definitions.py similarity index 100% rename from sdk/python/feast/templates/hazelcast/feature_repo/example_repo.py rename to sdk/python/feast/templates/hazelcast/feature_repo/feature_definitions.py diff --git a/sdk/python/feast/templates/hbase/bootstrap.py b/sdk/python/feast/templates/hbase/bootstrap.py index 94be8e441da..e8f59ee2caf 100644 --- a/sdk/python/feast/templates/hbase/bootstrap.py +++ b/sdk/python/feast/templates/hbase/bootstrap.py @@ -22,7 +22,7 @@ def bootstrap(): driver_stats_path = data_path / "driver_stats.parquet" driver_df.to_parquet(path=str(driver_stats_path), allow_truncated_timestamps=True) - example_py_file = repo_path / "example_repo.py" + example_py_file = repo_path / "feature_definitions.py" replace_str_in_file( example_py_file, "%PARQUET_PATH%", str(driver_stats_path.relative_to(repo_path)) ) diff --git a/sdk/python/feast/templates/hbase/feature_repo/example_repo.py b/sdk/python/feast/templates/hbase/feature_repo/feature_definitions.py similarity index 100% rename from sdk/python/feast/templates/hbase/feature_repo/example_repo.py rename to sdk/python/feast/templates/hbase/feature_repo/feature_definitions.py diff --git a/sdk/python/feast/templates/local/README.md b/sdk/python/feast/templates/local/README.md index 1e617cc442f..0f223bc9850 100644 --- a/sdk/python/feast/templates/local/README.md +++ b/sdk/python/feast/templates/local/README.md @@ -3,7 +3,7 @@ If you haven't already, check out the quickstart guide on Feast's website (http: uses this repo. A quick view of what's in this repository's `feature_repo/` directory: * `data/` contains raw demo parquet data -* `feature_repo/example_repo.py` contains demo feature definitions +* `feature_repo/feature_definitions.py` contains demo feature definitions * `feature_repo/feature_store.yaml` contains a demo setup configuring where data sources are * `feature_repo/test_workflow.py` showcases how to run all key Feast commands, including defining, retrieving, and pushing features. diff --git a/sdk/python/feast/templates/local/bootstrap.py b/sdk/python/feast/templates/local/bootstrap.py index 9f6a5a6c969..bd180ade01e 100644 --- a/sdk/python/feast/templates/local/bootstrap.py +++ b/sdk/python/feast/templates/local/bootstrap.py @@ -23,7 +23,7 @@ def bootstrap(): driver_stats_path = data_path / "driver_stats.parquet" driver_df.to_parquet(path=str(driver_stats_path), allow_truncated_timestamps=True) - example_py_file = repo_path / "example_repo.py" + example_py_file = repo_path / "feature_definitions.py" replace_str_in_file(example_py_file, "%PROJECT_NAME%", str(project_name)) replace_str_in_file( example_py_file, "%PARQUET_PATH%", str(driver_stats_path.relative_to(repo_path)) diff --git a/sdk/python/feast/templates/local/feature_repo/example_repo.py b/sdk/python/feast/templates/local/feature_repo/feature_definitions.py similarity index 100% rename from sdk/python/feast/templates/local/feature_repo/example_repo.py rename to sdk/python/feast/templates/local/feature_repo/feature_definitions.py diff --git a/sdk/python/feast/templates/milvus/README.md b/sdk/python/feast/templates/milvus/README.md index 1e617cc442f..0f223bc9850 100644 --- a/sdk/python/feast/templates/milvus/README.md +++ b/sdk/python/feast/templates/milvus/README.md @@ -3,7 +3,7 @@ If you haven't already, check out the quickstart guide on Feast's website (http: uses this repo. A quick view of what's in this repository's `feature_repo/` directory: * `data/` contains raw demo parquet data -* `feature_repo/example_repo.py` contains demo feature definitions +* `feature_repo/feature_definitions.py` contains demo feature definitions * `feature_repo/feature_store.yaml` contains a demo setup configuring where data sources are * `feature_repo/test_workflow.py` showcases how to run all key Feast commands, including defining, retrieving, and pushing features. diff --git a/sdk/python/feast/templates/milvus/bootstrap.py b/sdk/python/feast/templates/milvus/bootstrap.py index 9f6a5a6c969..bd180ade01e 100644 --- a/sdk/python/feast/templates/milvus/bootstrap.py +++ b/sdk/python/feast/templates/milvus/bootstrap.py @@ -23,7 +23,7 @@ def bootstrap(): driver_stats_path = data_path / "driver_stats.parquet" driver_df.to_parquet(path=str(driver_stats_path), allow_truncated_timestamps=True) - example_py_file = repo_path / "example_repo.py" + example_py_file = repo_path / "feature_definitions.py" replace_str_in_file(example_py_file, "%PROJECT_NAME%", str(project_name)) replace_str_in_file( example_py_file, "%PARQUET_PATH%", str(driver_stats_path.relative_to(repo_path)) diff --git a/sdk/python/feast/templates/milvus/feature_repo/__init__.py b/sdk/python/feast/templates/milvus/feature_repo/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/sdk/python/feast/templates/milvus/feature_repo/feature_definitions.py b/sdk/python/feast/templates/milvus/feature_repo/feature_definitions.py new file mode 100644 index 00000000000..e2fd0a891cf --- /dev/null +++ b/sdk/python/feast/templates/milvus/feature_repo/feature_definitions.py @@ -0,0 +1,148 @@ +# This is an example feature definition file + +from datetime import timedelta + +import pandas as pd + +from feast import ( + Entity, + FeatureService, + FeatureView, + Field, + FileSource, + Project, + PushSource, + RequestSource, +) +from feast.feature_logging import LoggingConfig +from feast.infra.offline_stores.file_source import FileLoggingDestination +from feast.on_demand_feature_view import on_demand_feature_view +from feast.types import Float32, Float64, Int64 + +# Define a project for the feature repo +project = Project(name="%PROJECT_NAME%", description="A project for driver statistics") + +# Define an entity for the driver. You can think of an entity as a primary key used to +# fetch features. +driver = Entity(name="driver", join_keys=["driver_id"]) + +# Read data from parquet files. Parquet is convenient for local development mode. For +# production, you can use your favorite DWH, such as BigQuery. See Feast documentation +# for more info. +driver_stats_source = FileSource( + name="driver_hourly_stats_source", + path="%PARQUET_PATH%", + timestamp_field="event_timestamp", + created_timestamp_column="created", +) + +# Our parquet files contain sample data that includes a driver_id column, timestamps and +# three feature column. Here we define a Feature View that will allow us to serve this +# data to our model online. +driver_stats_fv = FeatureView( + # The unique name of this feature view. Two feature views in a single + # project cannot have the same name + name="driver_hourly_stats", + entities=[driver], + ttl=timedelta(days=1), + # The list of features defined below act as a schema to both define features + # for both materialization of features into a store, and are used as references + # during retrieval for building a training dataset or serving features + schema=[ + Field(name="conv_rate", dtype=Float32), + Field(name="acc_rate", dtype=Float32), + Field(name="avg_daily_trips", dtype=Int64, description="Average daily trips"), + ], + online=True, + source=driver_stats_source, + # Tags are user defined key/value pairs that are attached to each + # feature view + tags={"team": "driver_performance"}, +) + +# Define a request data source which encodes features / information only +# available at request time (e.g. part of the user initiated HTTP request) +input_request = RequestSource( + name="vals_to_add", + schema=[ + Field(name="val_to_add", dtype=Int64), + Field(name="val_to_add_2", dtype=Int64), + ], +) + + +# Define an on demand feature view which can generate new features based on +# existing feature views and RequestSource features +@on_demand_feature_view( + sources=[driver_stats_fv, input_request], + schema=[ + Field(name="conv_rate_plus_val1", dtype=Float64), + Field(name="conv_rate_plus_val2", dtype=Float64), + ], +) +def transformed_conv_rate(inputs: pd.DataFrame) -> pd.DataFrame: + df = pd.DataFrame() + df["conv_rate_plus_val1"] = inputs["conv_rate"] + inputs["val_to_add"] + df["conv_rate_plus_val2"] = inputs["conv_rate"] + inputs["val_to_add_2"] + return df + + +# This groups features into a model version +driver_activity_v1 = FeatureService( + name="driver_activity_v1", + features=[ + driver_stats_fv[["conv_rate"]], # Sub-selects a feature from a feature view + transformed_conv_rate, # Selects all features from the feature view + ], + logging_config=LoggingConfig( + destination=FileLoggingDestination(path="%LOGGING_PATH%") + ), +) +driver_activity_v2 = FeatureService( + name="driver_activity_v2", features=[driver_stats_fv, transformed_conv_rate] +) + +# Defines a way to push data (to be available offline, online or both) into Feast. +driver_stats_push_source = PushSource( + name="driver_stats_push_source", + batch_source=driver_stats_source, +) + +# Defines a slightly modified version of the feature view from above, where the source +# has been changed to the push source. This allows fresh features to be directly pushed +# to the online store for this feature view. +driver_stats_fresh_fv = FeatureView( + name="driver_hourly_stats_fresh", + entities=[driver], + ttl=timedelta(days=1), + schema=[ + Field(name="conv_rate", dtype=Float32), + Field(name="acc_rate", dtype=Float32), + Field(name="avg_daily_trips", dtype=Int64), + ], + online=True, + source=driver_stats_push_source, # Changed from above + tags={"team": "driver_performance"}, +) + + +# Define an on demand feature view which can generate new features based on +# existing feature views and RequestSource features +@on_demand_feature_view( + sources=[driver_stats_fresh_fv, input_request], # relies on fresh version of FV + schema=[ + Field(name="conv_rate_plus_val1", dtype=Float64), + Field(name="conv_rate_plus_val2", dtype=Float64), + ], +) +def transformed_conv_rate_fresh(inputs: pd.DataFrame) -> pd.DataFrame: + df = pd.DataFrame() + df["conv_rate_plus_val1"] = inputs["conv_rate"] + inputs["val_to_add"] + df["conv_rate_plus_val2"] = inputs["conv_rate"] + inputs["val_to_add_2"] + return df + + +driver_activity_v3 = FeatureService( + name="driver_activity_v3", + features=[driver_stats_fresh_fv, transformed_conv_rate_fresh], +) diff --git a/sdk/python/feast/templates/milvus/feature_repo/feature_store.yaml b/sdk/python/feast/templates/milvus/feature_repo/feature_store.yaml new file mode 100644 index 00000000000..df884a3fddb --- /dev/null +++ b/sdk/python/feast/templates/milvus/feature_repo/feature_store.yaml @@ -0,0 +1,13 @@ +project: my_project +# By default, the registry is a file (but can be turned into a more scalable SQL-backed registry) +registry: data/registry.db +# The provider primarily specifies default offline / online stores & storing the registry in a given cloud +provider: local +online_store: + type: milvus + path: data/online_store.db + vector_enabled: true +entity_key_serialization_version: 3 +# By default, no_auth for authentication and authorization, other possible values kubernetes and oidc. Refer the documentation for more details. +auth: + type: no_auth \ No newline at end of file diff --git a/sdk/python/feast/templates/milvus/feature_repo/test_workflow.py b/sdk/python/feast/templates/milvus/feature_repo/test_workflow.py new file mode 100644 index 00000000000..eebeb113115 --- /dev/null +++ b/sdk/python/feast/templates/milvus/feature_repo/test_workflow.py @@ -0,0 +1,130 @@ +import subprocess +from datetime import datetime + +import pandas as pd + +from feast import FeatureStore +from feast.data_source import PushMode + + +def run_demo(): + store = FeatureStore(repo_path=".") + print("\n--- Run feast apply ---") + subprocess.run(["feast", "apply"]) + + print("\n--- Historical features for training ---") + fetch_historical_features_entity_df(store, for_batch_scoring=False) + + print("\n--- Historical features for batch scoring ---") + fetch_historical_features_entity_df(store, for_batch_scoring=True) + + print("\n--- Load features into online store ---") + store.materialize_incremental(end_date=datetime.now()) + + print("\n--- Online features ---") + fetch_online_features(store) + + print("\n--- Online features retrieved (instead) through a feature service---") + fetch_online_features(store, source="feature_service") + + print( + "\n--- Online features retrieved (using feature service v3, which uses a feature view with a push source---" + ) + fetch_online_features(store, source="push") + + print("\n--- Simulate a stream event ingestion of the hourly stats df ---") + event_df = pd.DataFrame.from_dict( + { + "driver_id": [1001], + "event_timestamp": [ + datetime.now(), + ], + "created": [ + datetime.now(), + ], + "conv_rate": [1.0], + "acc_rate": [1.0], + "avg_daily_trips": [1000], + } + ) + print(event_df) + store.push("driver_stats_push_source", event_df, to=PushMode.ONLINE_AND_OFFLINE) + + print("\n--- Online features again with updated values from a stream push---") + fetch_online_features(store, source="push") + + print("\n--- Run feast teardown ---") + subprocess.run(["feast", "teardown"]) + + +def fetch_historical_features_entity_df(store: FeatureStore, for_batch_scoring: bool): + # Note: see https://docs.feast.dev/getting-started/concepts/feature-retrieval for more details on how to retrieve + # for all entities in the offline store instead + entity_df = pd.DataFrame.from_dict( + { + # entity's join key -> entity values + "driver_id": [1001, 1002, 1003], + # "event_timestamp" (reserved key) -> timestamps + "event_timestamp": [ + datetime(2021, 4, 12, 10, 59, 42), + datetime(2021, 4, 12, 8, 12, 10), + datetime(2021, 4, 12, 16, 40, 26), + ], + # (optional) label name -> label values. Feast does not process these + "label_driver_reported_satisfaction": [1, 5, 3], + # values we're using for an on-demand transformation + "val_to_add": [1, 2, 3], + "val_to_add_2": [10, 20, 30], + } + ) + # For batch scoring, we want the latest timestamps + if for_batch_scoring: + entity_df["event_timestamp"] = pd.to_datetime("now", utc=True) + + training_df = store.get_historical_features( + entity_df=entity_df, + features=[ + "driver_hourly_stats:conv_rate", + "driver_hourly_stats:acc_rate", + "driver_hourly_stats:avg_daily_trips", + "transformed_conv_rate:conv_rate_plus_val1", + "transformed_conv_rate:conv_rate_plus_val2", + ], + ).to_df() + print(training_df.head()) + + +def fetch_online_features(store, source: str = ""): + entity_rows = [ + # {join_key: entity_value} + { + "driver_id": 1001, + "val_to_add": 1000, + "val_to_add_2": 2000, + }, + { + "driver_id": 1002, + "val_to_add": 1001, + "val_to_add_2": 2002, + }, + ] + if source == "feature_service": + features_to_fetch = store.get_feature_service("driver_activity_v1") + elif source == "push": + features_to_fetch = store.get_feature_service("driver_activity_v3") + else: + features_to_fetch = [ + "driver_hourly_stats:acc_rate", + "transformed_conv_rate:conv_rate_plus_val1", + "transformed_conv_rate:conv_rate_plus_val2", + ] + returned_features = store.get_online_features( + features=features_to_fetch, + entity_rows=entity_rows, + ).to_dict() + for key, value in sorted(returned_features.items()): + print(key, " : ", value) + + +if __name__ == "__main__": + run_demo() diff --git a/sdk/python/feast/templates/postgres/feature_repo/example_repo.py b/sdk/python/feast/templates/postgres/feature_repo/feature_definitions.py similarity index 100% rename from sdk/python/feast/templates/postgres/feature_repo/example_repo.py rename to sdk/python/feast/templates/postgres/feature_repo/feature_definitions.py diff --git a/sdk/python/feast/templates/postgres/feature_repo/feature_store.yaml b/sdk/python/feast/templates/postgres/feature_repo/feature_store.yaml index 0663ff0ad97..a2fc2f51b61 100644 --- a/sdk/python/feast/templates/postgres/feature_repo/feature_store.yaml +++ b/sdk/python/feast/templates/postgres/feature_repo/feature_store.yaml @@ -2,7 +2,7 @@ project: my_project provider: local registry: registry_type: sql - path: postgresql://postgres:mysecretpassword@127.0.0.1:55001/feast + path: postgresql://DB_USERNAME:DB_PASSWORD@DB_HOST:DB_PORT/DB_NAME cache_ttl_seconds: 60 sqlalchemy_config_kwargs: echo: false diff --git a/sdk/python/feast/templates/pytorch_nlp/README.md b/sdk/python/feast/templates/pytorch_nlp/README.md new file mode 100644 index 00000000000..7fe0e0e708d --- /dev/null +++ b/sdk/python/feast/templates/pytorch_nlp/README.md @@ -0,0 +1,611 @@ +# PyTorch NLP Sentiment Analysis with Feast + +This template demonstrates how to build a complete sentiment analysis pipeline using **Feast** (Feature Store) with **PyTorch** and **Hugging Face Transformers**. It showcases modern MLOps practices for NLP including feature engineering, model serving, and real-time inference. + +## šŸŽÆ What You'll Learn + +- **Feast Fundamentals**: Feature stores, entities, feature views, and services +- **NLP Feature Engineering**: Text preprocessing and feature extraction patterns +- **PyTorch Integration**: Using pre-trained Hugging Face models with Feast +- **Real-time Serving**: Online feature serving for production inference +- **MLOps Patterns**: Model versioning, performance monitoring, and data governance + +## šŸš€ Quick Start + +### Prerequisites + +- Python 3.8+ +- pip or conda for package management + +### 1. Initialize the Project + +```bash +feast init my-sentiment-project -t pytorch_nlp +cd my-sentiment-project +``` + +### 2. Install Dependencies + +```bash +# Install Feast with NLP support (includes PyTorch, transformers, and ML utilities) +pip install feast[nlp] +``` + +### 3. Apply and Materialize Features + +```bash +cd feature_repo +feast apply +feast materialize-incremental $(date -u +"%Y-%m-%dT%H:%M:%S") +``` + +### 4. Start Feature Server + +```bash +feast serve --host 0.0.0.0 --port 6566 +``` + +### 5. Test with Python (Optional) + +```bash +python test_workflow.py +``` + +## šŸ“Š What's Included + +### Sample Dataset +- **1000 synthetic text samples** with sentiment labels (positive/negative/neutral) +- **Engineered features**: text length, word count, emoji count, etc. +- **User context**: aggregated user statistics and behavior patterns +- **Dynamic timestamps** generated within the past 30 days for realistic demo experience + +### Feature Engineering Pipeline +- **Text Features**: Content, metadata, and linguistic characteristics +- **User Features**: Historical sentiment patterns and engagement metrics +- **Real-time Features**: On-demand sentiment prediction using pre-trained models + +### Model Integration +- **Pre-trained Models**: CardiffNLP Twitter-RoBERTa for sentiment analysis +- **Embedding Generation**: Text vectorization for similarity and clustering +- **Confidence Scoring**: Prediction confidence and probability distributions + +## 🌐 HTTP Feature Server + +Once you've started the feature server with `feast serve`, you can query features via HTTP API: + +### Basic Materialized Features + +Query stored text and user features: + +```bash +curl -X POST \ + "http://localhost:6566/get-online-features" \ + -H "Content-Type: application/json" \ + -d '{ + "features": [ + "text_features:text_content", + "text_features:sentiment_label", + "user_stats:user_avg_sentiment" + ], + "entities": { + "text_id": ["text_0000", "text_0001"], + "user_id": ["user_080", "user_091"] + } + }' +``` + +**Example Response:** +```json +{ + "metadata": {"feature_names": ["text_id","user_id","sentiment_label","text_content","user_avg_sentiment"]}, + "results": [ + {"values": ["text_0000"], "statuses": ["PRESENT"]}, + {"values": ["user_080"], "statuses": ["PRESENT"]}, + {"values": ["positive"], "statuses": ["PRESENT"]}, + {"values": ["Having an amazing day at the beach with friends!"], "statuses": ["PRESENT"]}, + {"values": [0.905], "statuses": ["PRESENT"]} + ] +} +``` + +### On-Demand Sentiment Predictions + +Get real-time sentiment analysis: + +```bash +curl -X POST \ + "http://localhost:6566/get-online-features" \ + -H "Content-Type: application/json" \ + -d '{ + "features": [ + "sentiment_prediction:predicted_sentiment", + "sentiment_prediction:sentiment_confidence", + "sentiment_prediction:positive_prob" + ], + "entities": { + "input_text": ["I love this amazing product!", "This service is terrible"], + "model_name": ["cardiffnlp/twitter-roberta-base-sentiment-latest", "cardiffnlp/twitter-roberta-base-sentiment-latest"] + } + }' +``` + +### Feature Service (Complete Feature Set) + +Query using predefined feature service: + +```bash +curl -X POST \ + "http://localhost:6566/get-online-features" \ + -H "Content-Type: application/json" \ + -d '{ + "feature_service": "sentiment_analysis_v2", + "entities": { + "text_id": ["text_0000"], + "user_id": ["user_080"], + "input_text": ["This is an amazing experience!"], + "model_name": ["cardiffnlp/twitter-roberta-base-sentiment-latest"] + } + }' +``` + +**Note**: Use actual entity combinations from your generated data. Run `head data/sentiment_data.parquet` to see available `text_id` and `user_id` values. + +## šŸ—ļø Project Structure + +``` +my-sentiment-project/ +ā”œā”€ā”€ README.md # This file +└── feature_repo/ + ā”œā”€ā”€ feature_store.yaml # Feast configuration + ā”œā”€ā”€ example_repo.py # Feature definitions (uses pre-loaded artifacts) + ā”œā”€ā”€ static_artifacts.py # Static artifacts loading (models, lookup tables) + ā”œā”€ā”€ test_workflow.py # Complete demo workflow + └── data/ # Generated sample data + └── sentiment_data.parquet +``` + +## šŸ”§ Key Components + +### Entities +- **`text`**: Unique identifier for text samples +- **`user`**: User who created the content + +### Feature Views +- **`text_features`**: Raw text content and engineered features +- **`user_stats`**: User-level aggregated statistics and behavior + +### On-Demand Features +- **`sentiment_prediction`**: Real-time sentiment analysis using PyTorch models +- **Features**: predicted sentiment, confidence scores, probability distributions, embeddings + +### Feature Services +- **`sentiment_analysis_v1`**: Basic sentiment features for simple models +- **`sentiment_analysis_v2`**: Advanced features with user context +- **`sentiment_training_features`**: Historical features for model training + +## āš™ļø Configuration + +This template is configured for **local development** using SQLite - no external dependencies required! + +### Current Configuration (`feature_store.yaml`) + +```yaml +project: my_project +provider: local # Local provider (no cloud) +registry: data/registry.db # SQLite registry +online_store: + type: sqlite # SQLite online store (NOT Redis) + path: data/online_store.db # Local SQLite file +offline_store: + type: file # Local file-based offline store +``` + +### Why SQLite? +- āœ… **Zero setup** - Works immediately after `feast init` +- āœ… **Self-contained** - All data in local files +- āœ… **No external services** - No Redis/cloud required +- āœ… **Perfect for demos** - Easy to share and understand + +## šŸš€ Static Artifacts Loading + +This template demonstrates **static artifacts loading** - a performance optimization that loads models, lookup tables, and other artifacts once at feature server startup instead of on each request. + +### What are Static Artifacts? + +Static artifacts are pre-loaded resources that remain constant during server operation: +- **Small ML models** (sentiment analysis, classification, small neural networks) +- **Lookup tables and mappings** (label encoders, category mappings) +- **Configuration data** (model parameters, feature mappings) +- **Pre-computed embeddings** (user embeddings, item features) + +### Performance Benefits + +**Before (Per-Request Loading):** +```python +def sentiment_prediction(inputs): + # āŒ Model loads on every request - slow! + model = pipeline("sentiment-analysis", model="...") + return model(inputs["text"]) +``` + +**After (Startup Loading):** +```python +# āœ… Model loads once at server startup +def sentiment_prediction(inputs): + global _sentiment_model # Pre-loaded model + return _sentiment_model(inputs["text"]) +``` + +**Performance Impact:** +- šŸš€ **10-100x faster** inference (no model loading overhead) +- šŸ’¾ **Lower memory usage** (shared model across requests) +- ⚔ **Better scalability** (consistent response times) + +### How It Works + +1. **Startup**: Feast server loads `static_artifacts.py` during initialization +2. **Loading**: `load_artifacts(app)` function stores models in `app.state` +3. **Access**: On-demand feature views access pre-loaded artifacts via global references + +```python +# static_artifacts.py - Define what to load +def load_artifacts(app: FastAPI): + app.state.sentiment_model = load_sentiment_model() + app.state.lookup_tables = load_lookup_tables() + + # Update global references for easy access + import example_repo + example_repo._sentiment_model = app.state.sentiment_model + example_repo._lookup_tables = app.state.lookup_tables + +# example_repo.py - Use pre-loaded artifacts +_sentiment_model = None # Set by static_artifacts.py + +def sentiment_prediction(inputs): + global _sentiment_model + if _sentiment_model is not None: + return _sentiment_model(inputs["text"]) + else: + return fallback_predictions() +``` + +### Scope and Limitations + +**āœ… Great for:** +- Small to medium models (< 1GB) +- Fast-loading models (sentiment analysis, classification) +- Lookup tables and reference data +- Configuration parameters +- Pre-computed embeddings + +**āŒ Not recommended for:** +- **Large Language Models (LLMs)** - Use dedicated serving solutions like vLLM, TGI, or TensorRT-LLM +- Models requiring GPU clusters +- Frequently updated models +- Models with complex initialization dependencies + +**Note:** Feast is optimized for feature serving, not large model inference. For production LLM workloads, use specialized model serving platforms. + +### Customizing Static Artifacts + +To add your own artifacts, modify `static_artifacts.py`: + +```python +def load_custom_embeddings(): + """Load pre-computed user embeddings.""" + embeddings_file = Path(__file__).parent / "data" / "user_embeddings.npy" + if embeddings_file.exists(): + import numpy as np + return {"embeddings": np.load(embeddings_file)} + return None + +def load_artifacts(app: FastAPI): + # Load your custom artifacts + app.state.custom_embeddings = load_custom_embeddings() + app.state.config_params = {"threshold": 0.7, "top_k": 10} + + # Make them available to feature views + import example_repo + example_repo._custom_embeddings = app.state.custom_embeddings +``` + +## šŸ“š Detailed Usage + +### 1. Feature Store Setup + +```python +from feast import FeatureStore + +store = FeatureStore(repo_path=".") +``` + +### 2. Training Data Retrieval + +```python +# Get historical features for model training +from datetime import datetime +import pandas as pd + +entity_df = pd.DataFrame({ + "text_id": ["text_0000", "text_0001", "text_0002"], + "user_id": ["user_080", "user_091", "user_052"], # Use actual generated user IDs + "event_timestamp": [datetime.now(), datetime.now(), datetime.now()] # Current timestamps +}) + +training_df = store.get_historical_features( + entity_df=entity_df, + features=[ + "text_features:text_content", + "text_features:sentiment_label", + "text_features:text_length", + "user_stats:user_avg_sentiment", + ], +).to_df() + +print(f"Retrieved {len(training_df)} training samples") +print(training_df.head()) +``` + +### 3. Real-time Inference + +```python +# Get features for online serving (use actual entity combinations) +entity_rows = [ + {"text_id": "text_0000", "user_id": "user_080"}, + {"text_id": "text_0001", "user_id": "user_091"} +] + +online_features = store.get_online_features( + features=store.get_feature_service("sentiment_analysis_v1"), + entity_rows=entity_rows, +).to_dict() + +print("Online features:", online_features) +``` + +### 4. On-Demand Sentiment Prediction + +```python +# Real-time sentiment analysis +prediction_rows = [{ + "input_text": "I love this product!", + "model_name": "cardiffnlp/twitter-roberta-base-sentiment-latest" +}] + +predictions = store.get_online_features( + features=[ + "sentiment_prediction:predicted_sentiment", + "sentiment_prediction:sentiment_confidence", + ], + entity_rows=prediction_rows, +).to_dict() +``` + +## šŸš€ Complete End-to-End Demo + +Here's a step-by-step walkthrough of the entire template workflow: + +### 1. Initialize and Setup + +```bash +# Create new project +feast init my-sentiment-demo -t pytorch_nlp +cd my-sentiment-demo + +# Install dependencies +pip install torch>=2.0.0 transformers>=4.30.0 + +# Navigate to feature repository +cd feature_repo +``` + +### 2. Apply Feature Store Configuration + +```bash +# Register entities, feature views, and services +feast apply +``` + +**Expected Output:** +``` +Created entity text +Created entity user +Created feature view text_features +Created feature view user_stats +Created on demand feature view sentiment_prediction +Created feature service sentiment_analysis_v1 +Created feature service sentiment_analysis_v2 +``` + +### 3. Materialize Features + +```bash +# Load features into online store +feast materialize-incremental $(date -u +"%Y-%m-%dT%H:%M:%S") +``` + +**Expected Output:** +``` +Materializing 2 feature views to 2025-XX-XX XX:XX:XX+00:00 into the sqlite online store. +text_features: ā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆ +user_stats: ā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆā–ˆ +``` + +### 4. Start Feature Server + +```bash +# Start HTTP feature server +feast serve --host 0.0.0.0 --port 6566 +``` + +**Expected Output:** +``` +Starting gunicorn 23.0.0 +Listening at: http://0.0.0.0:6566 +``` + +### 5. Query Features + +In a new terminal, test the feature server: + +```bash +# Check actual entity IDs in your data +python -c " +import pandas as pd +df = pd.read_parquet('data/sentiment_data.parquet') +print('Sample entities:', df.head()) +" + +# Test with actual entity combinations +curl -X POST \ + "http://localhost:6566/get-online-features" \ + -H "Content-Type: application/json" \ + -d '{ + "features": ["text_features:text_content", "text_features:sentiment_label"], + "entities": { + "text_id": ["text_0000"], + "user_id": ["user_XXX"] + } + }' | jq +``` + +## šŸŽ® Customization Examples + +### Adding New Features + +```python +# In example_repo.py, add to text_features_fv schema: +Field(name="hashtag_count", dtype=Int64, description="Number of hashtags"), +Field(name="mention_count", dtype=Int64, description="Number of @mentions"), +Field(name="url_count", dtype=Int64, description="Number of URLs"), +``` + +### Using Different Models + +```python +# In the sentiment_prediction function, change model: +model_name = "nlptown/bert-base-multilingual-uncased-sentiment" +# or +model_name = "distilbert-base-uncased-finetuned-sst-2-english" +``` + +### Adding Custom Transformations + +```python +@on_demand_feature_view( + sources=[text_input_request], + schema=[Field(name="toxicity_score", dtype=Float32)], +) +def toxicity_detection(inputs: pd.DataFrame) -> pd.DataFrame: + # Implement toxicity detection logic + pass +``` + +## šŸ“ˆ Production Considerations + +### Scaling to Production + +1. **Cloud Deployment**: Use AWS, GCP, or Azure providers instead of local +2. **Vector Store**: Replace SQLite with Milvus for similarity search +3. **Model Serving**: Deploy models with KServe or other serving framework +4. **Monitoring**: Add feature drift detection and model performance tracking + +### Performance Optimization + +**Current Architecture:** +- āœ… **Static artifacts loading** at server startup (see `static_artifacts.py`) +- āœ… **Pre-loaded models** cached in memory for fast inference +- CPU-only operation to avoid multiprocessing issues +- SQLite-based storage for fast local access + +**Implemented Optimizations:** +- **Startup-time Model Loading**: āœ… Models load once at server startup via `static_artifacts.py` +- **Memory-efficient Caching**: āœ… Models stored in `app.state` and accessed via global references +- **Fallback Handling**: āœ… Graceful degradation when artifacts fail to load + +**Additional Production Optimizations:** +1. **Batch Inference**: Process multiple texts together for efficiency +2. **Feature Materialization**: Pre-compute expensive features offline +3. **Async Processing**: Use async patterns for real-time serving +4. **Model Serving Layer**: Use dedicated model servers (TorchServe, vLLM) for large models + +### Production Configuration Examples + +**Note**: The demo uses SQLite (above). These are examples for production deployment: + +```yaml +# feature_store.yaml for AWS production (requires Redis setup) +project: sentiment_analysis_prod +provider: aws +registry: s3://my-bucket/feast/registry.pb +online_store: + type: redis # Requires separate Redis server + connection_string: redis://my-redis-cluster:6379 +offline_store: + type: bigquery + project_id: my-gcp-project + +# feature_store.yaml for GCP production (requires cloud services) +project: sentiment_analysis_prod +provider: gcp +registry: gs://my-bucket/feast/registry.pb +online_store: + type: redis # Requires separate Redis server + connection_string: redis://my-redis-cluster:6379 +offline_store: + type: bigquery + project_id: my-gcp-project +``` + +## šŸ¤ Contributing + +This template is designed to be extended and customized: + +1. **Add new feature transformations** in `example_repo.py` +2. **Experiment with different models** in the `sentiment_prediction` function +3. **Extend the test workflow** with additional evaluation metrics +4. **Add new data sources** (Twitter API, product reviews, etc.) + +## šŸ“– Resources + +- [Feast Documentation](https://docs.feast.dev/) +- [Hugging Face Transformers](https://huggingface.co/docs/transformers/) +- [PyTorch Documentation](https://pytorch.org/docs/) + +## šŸ› Troubleshooting + +### Common Issues + +**ImportError: No module named 'transformers'** +```bash +pip install torch transformers +``` + +**Model download timeout** +```python +# Set environment variable for Hugging Face cache +export HF_HOME=/path/to/cache +``` + +**Feature store initialization fails** +```bash +# Reset the feature store +feast teardown +feast apply +``` + +**On-demand features return defaults** +- This is expected if PyTorch/transformers aren't installed +- The template includes fallback dummy predictions for demonstration + +### Getting Help + +- Check the [Feast GitHub Issues](https://github.com/feast-dev/feast/issues) +- Join the [Feast Slack Community](https://slack.feast.dev/) +- Review the [PyTorch Forums](https://discuss.pytorch.org/) + +--- + +**Happy Feature Engineering! šŸŽ‰** + +Built with ā¤ļø using Feast, PyTorch, and Hugging Face. diff --git a/sdk/python/feast/templates/pytorch_nlp/__init__.py b/sdk/python/feast/templates/pytorch_nlp/__init__.py new file mode 100644 index 00000000000..de76b0a8f66 --- /dev/null +++ b/sdk/python/feast/templates/pytorch_nlp/__init__.py @@ -0,0 +1 @@ +# Empty file to make this a Python package diff --git a/sdk/python/feast/templates/pytorch_nlp/bootstrap.py b/sdk/python/feast/templates/pytorch_nlp/bootstrap.py new file mode 100644 index 00000000000..6aad854747e --- /dev/null +++ b/sdk/python/feast/templates/pytorch_nlp/bootstrap.py @@ -0,0 +1,305 @@ +import pathlib +import random +from datetime import datetime, timedelta + +import pandas as pd +import pyarrow as pa +import pyarrow.parquet as pq + +from feast.file_utils import replace_str_in_file + + +def create_sentiment_data(num_samples: int = 1000) -> pd.DataFrame: + """Generate sentiment analysis dataset using BERTweet classifier predictions.""" + + # Diverse realistic text samples from various domains + sample_texts = [ + # Social media / tweets style + "Having an amazing day at the beach with friends!", + "Traffic is horrible today, going to be late for everything", + "Just finished my morning coffee, time to start work", + "This weather is perfect for a weekend getaway", + "Frustrated with this constant construction noise", + "The sunset tonight is absolutely breathtaking", + "Finally got tickets to the concert I wanted!", + "My phone battery died right when I needed it most", + "Loving the new album that just dropped today", + "Can't believe how long this line is taking", + # Product reviews / opinions + "This phone has incredible battery life and camera quality", + "The delivery was late and packaging was damaged", + "Pretty standard laptop, does what it's supposed to do", + "Amazing customer service, resolved my issue quickly", + "The quality is terrible for the price, disappointed", + "Works fine, good value for money, as described", + "Best purchase I've made this year, highly recommend", + "Returned this item, didn't work as advertised", + "Decent product but could be better for the cost", + "Exceeded my expectations, will buy again", + # General experiences + "Learning something new always makes me happy", + "Dealing with technical issues is draining my energy", + "The meeting went okay, covered the basic topics", + "Excited about the weekend plans with family", + "Another day of debugging code, the struggle continues", + "Really enjoying this book I started reading", + "The restaurant service was disappointing tonight", + "Nothing special planned, just a quiet evening", + "Great presentation today, audience was engaged", + "Feeling overwhelmed with all these deadlines", + # News / current events style + "The new policy changes will benefit small businesses", + "This decision could have negative environmental impact", + "The research findings are interesting but inconclusive", + "Economic indicators suggest stable growth ahead", + "Mixed reactions to the announcement yesterday", + "The data shows promising results across demographics", + "Public opinion remains divided on this issue", + "Significant improvements in the healthcare system", + "Concerns raised about the new regulations", + "Standard quarterly results meeting projections", + ] + + # Try to use BERTweet sentiment classifier, fallback to rule-based if not available + try: + from transformers import pipeline + + print(" šŸ¤– Loading BERTweet sentiment classifier...") + + # Use BERTweet model specifically trained for Twitter sentiment + sentiment_classifier = pipeline( + "sentiment-analysis", + model="finiteautomata/bertweet-base-sentiment-analysis", + return_all_scores=True, + ) + use_real_classifier = True + print(" āœ… BERTweet sentiment classifier loaded successfully") + + except ImportError: + print(" āš ļø Transformers not available, using rule-based sentiment") + print(" šŸ’” For real classifier: pip install transformers torch") + use_real_classifier = False + except Exception as e: + print(f" āš ļø Could not load BERTweet ({e}), using rule-based sentiment") + use_real_classifier = False + + # Generate data + data = [] + # Use current time and generate data within the last 30 days + now = datetime.now() + start_date = now - timedelta(days=30) + + # Extend sample texts by cycling through them to reach num_samples + all_texts = (sample_texts * (num_samples // len(sample_texts) + 1))[:num_samples] + + for i, base_text in enumerate(all_texts): + # Add some realistic variations to make texts more diverse + text = base_text + + # Occasionally add emphasis or emoji + if random.random() < 0.15: + text = text + "!" + elif random.random() < 0.1: + text = text + "..." + elif random.random() < 0.08: + if any( + word in text.lower() + for word in ["amazing", "love", "great", "best", "happy", "excited"] + ): + text = text + " 😊" + elif random.random() < 0.08: + if any( + word in text.lower() + for word in ["terrible", "disappointed", "frustrated", "horrible"] + ): + text = text + " šŸ˜ž" + + # Get sentiment from real classifier or fallback + if use_real_classifier: + try: + predictions = sentiment_classifier(text)[0] + + # Find highest confidence prediction + best_pred = max(predictions, key=lambda x: x["score"]) + sentiment_label = best_pred[ + "label" + ].upper() # BERTweet returns 'POS', 'NEG', 'NEU' + sentiment_score = best_pred["score"] + + # Map BERTweet labels to our format + label_map = {"POS": "positive", "NEG": "negative", "NEU": "neutral"} + sentiment_label = label_map.get( + sentiment_label, sentiment_label.lower() + ) + + except Exception as e: + print(f" āš ļø Classifier error for text {i}: {e}") + # Fallback to simple rule-based + sentiment_label, sentiment_score = _rule_based_sentiment(text) + else: + # Rule-based fallback + sentiment_label, sentiment_score = _rule_based_sentiment(text) + + # Generate engineered features + text_length = len(text) + word_count = len(text.split()) + exclamation_count = text.count("!") + caps_ratio = sum(1 for c in text if c.isupper()) / len(text) if text else 0 + emoji_count = sum(1 for c in text if ord(c) > 127) # Simple emoji detection + + # Random timestamp within the past 30 days + days_offset = random.randint(0, 30) + hours_offset = random.randint(0, 23) + minutes_offset = random.randint(0, 59) + event_timestamp = start_date + timedelta( + days=days_offset, hours=hours_offset, minutes=minutes_offset + ) + + data.append( + { + "text_id": f"text_{i:04d}", + "user_id": f"user_{random.randint(1, 100):03d}", + "text_content": text, + "sentiment_label": sentiment_label, + "sentiment_score": round(sentiment_score, 3), + "text_length": text_length, + "word_count": word_count, + "exclamation_count": exclamation_count, + "caps_ratio": round(caps_ratio, 3), + "emoji_count": emoji_count, + "event_timestamp": pd.Timestamp(event_timestamp, tz="UTC"), + "created": pd.Timestamp.now(tz="UTC").round("ms"), + } + ) + + df = pd.DataFrame(data) + + # Calculate user-level aggregations + user_stats = ( + df.groupby("user_id") + .agg({"sentiment_score": "mean", "text_id": "count", "text_length": "mean"}) + .rename( + columns={ + "sentiment_score": "user_avg_sentiment", + "text_id": "user_text_count", + "text_length": "user_avg_text_length", + } + ) + .round(3) + .reset_index() + ) + + # Merge user stats back to main dataframe + df = df.merge(user_stats, on="user_id", how="left") + + return df + + +def _rule_based_sentiment(text: str) -> tuple[str, float]: + """Fallback rule-based sentiment analysis when BERTweet is not available.""" + text_lower = text.lower() + + positive_words = [ + "amazing", + "love", + "great", + "excellent", + "wonderful", + "perfect", + "outstanding", + "fantastic", + "best", + "happy", + "good", + "awesome", + "incredible", + "beautiful", + "excited", + "enjoying", + ] + negative_words = [ + "terrible", + "horrible", + "awful", + "worst", + "bad", + "disappointed", + "frustrated", + "angry", + "sad", + "broken", + "failed", + "poor", + "draining", + "overwhelming", + "disappointing", + ] + + positive_count = sum(1 for word in positive_words if word in text_lower) + negative_count = sum(1 for word in negative_words if word in text_lower) + + if positive_count > negative_count: + return "positive", random.uniform(0.6, 0.9) + elif negative_count > positive_count: + return "negative", random.uniform(0.6, 0.9) + else: + return "neutral", random.uniform(0.5, 0.7) + + +def bootstrap(): + """Bootstrap the pytorch_nlp template with sample data.""" + repo_path = pathlib.Path(__file__).parent.absolute() / "feature_repo" + raw_project_name = pathlib.Path(__file__).parent.absolute().name + + # Sanitize project name for SQLite compatibility (no hyphens allowed) + project_name = raw_project_name.replace("-", "_") + if project_name != raw_project_name: + print(f" ā„¹ļø Project name sanitized: '{raw_project_name}' → '{project_name}'") + print(" šŸ’” SQLite table names cannot contain hyphens") + + data_path = repo_path / "data" + data_path.mkdir(exist_ok=True) + + print("šŸŽ­ Setting up sentiment analysis data for PyTorch NLP demonstration...") + + parquet_file = data_path / "sentiment_data.parquet" + + # Generate sentiment data + print(" šŸ“ Generating synthetic sentiment analysis dataset...") + df = create_sentiment_data(num_samples=1000) + + # Save to parquet + table = pa.Table.from_pandas(df) + pq.write_table(table, parquet_file) + + print(f" āœ… Created sentiment dataset with {len(df)} samples") + print(" šŸ“Š Sentiment distribution:") + sentiment_counts = df["sentiment_label"].value_counts() + for sentiment, count in sentiment_counts.items(): + print(f" - {sentiment.capitalize()}: {count} samples") + + # Replace template placeholders + example_py_file = repo_path / "example_repo.py" + replace_str_in_file(example_py_file, "%PROJECT_NAME%", str(project_name)) + + test_workflow_file = repo_path / "test_workflow.py" + replace_str_in_file(test_workflow_file, "%PROJECT_NAME%", str(project_name)) + + print("šŸš€ PyTorch NLP template initialized successfully!") + + print("\nšŸŽÆ To get started:") + print(f" 1. cd {project_name}") + print(" 2. pip install -r requirements.txt") + print(" 3. cd feature_repo") + print(" 4. feast apply") + print(" 5. feast materialize") + print(" 6. python test_workflow.py") + print("\nšŸ’” This template demonstrates:") + print(" - Text feature engineering with Feast") + print(" - PyTorch + Hugging Face transformers integration") + print(" - Sentiment analysis with pre-trained models") + print(" - Online and offline feature serving") + + +if __name__ == "__main__": + bootstrap() diff --git a/sdk/python/feast/templates/pytorch_nlp/feature_repo/__init__.py b/sdk/python/feast/templates/pytorch_nlp/feature_repo/__init__.py new file mode 100644 index 00000000000..5d81b19ce14 --- /dev/null +++ b/sdk/python/feast/templates/pytorch_nlp/feature_repo/__init__.py @@ -0,0 +1 @@ +# This file is auto-generated by the Feast project template diff --git a/sdk/python/feast/templates/pytorch_nlp/feature_repo/example_repo.py b/sdk/python/feast/templates/pytorch_nlp/feature_repo/example_repo.py new file mode 100644 index 00000000000..ee49bea2899 --- /dev/null +++ b/sdk/python/feast/templates/pytorch_nlp/feature_repo/example_repo.py @@ -0,0 +1,275 @@ +""" +PyTorch NLP Sentiment Analysis Feature Repository + +This template demonstrates sentiment analysis using: +- Text feature engineering for NLP +- PyTorch + Hugging Face transformers integration +- On-demand sentiment prediction features +- Online and offline feature serving patterns +""" + +from datetime import timedelta +from pathlib import Path + +import pandas as pd + +from feast import ( + Entity, + FeatureService, + FeatureView, + Field, + FileSource, + RequestSource, + ValueType, +) +from feast.on_demand_feature_view import on_demand_feature_view +from feast.types import Array, Float32, Int64, String + +try: + # Import static artifacts helpers (available when feature server loads artifacts) + from static_artifacts import get_lookup_tables, get_sentiment_model +except ImportError: + # Fallback for when static_artifacts.py is not available + get_sentiment_model = None + get_lookup_tables = None + +# Global references for static artifacts (set by feature server) +_sentiment_model = None +_lookup_tables: dict = {} + +# Configuration +repo_path = Path(__file__).parent +data_path = repo_path / "data" + +# Define entities - primary keys for joining data +text_entity = Entity( + name="text", + join_keys=["text_id"], + value_type=ValueType.STRING, + description="Unique identifier for text samples", +) + +user_entity = Entity( + name="user", + join_keys=["user_id"], + value_type=ValueType.STRING, + description="User who created the text content", +) + +# Data source - points to the parquet file created by bootstrap +sentiment_source = FileSource( + name="sentiment_data_source", + path=str(data_path / "sentiment_data.parquet"), + timestamp_field="event_timestamp", + created_timestamp_column="created", +) + +# Feature view for text metadata and engineered features +text_features_fv = FeatureView( + name="text_features", + entities=[text_entity], + ttl=timedelta(days=7), # Keep features for 7 days + schema=[ + Field(name="text_content", dtype=String, description="Raw text content"), + Field( + name="sentiment_label", + dtype=String, + description="Ground truth sentiment label", + ), + Field( + name="sentiment_score", + dtype=Float32, + description="Ground truth sentiment score", + ), + Field(name="text_length", dtype=Int64, description="Character count of text"), + Field(name="word_count", dtype=Int64, description="Word count of text"), + Field( + name="exclamation_count", + dtype=Int64, + description="Number of exclamation marks", + ), + Field(name="caps_ratio", dtype=Float32, description="Ratio of capital letters"), + Field( + name="emoji_count", dtype=Int64, description="Number of emoji characters" + ), + ], + online=True, + source=sentiment_source, + tags={"team": "nlp", "domain": "sentiment_analysis"}, +) + +# Feature view for user-level aggregations +user_stats_fv = FeatureView( + name="user_stats", + entities=[user_entity], + ttl=timedelta(days=30), # User stats change less frequently + schema=[ + Field( + name="user_avg_sentiment", + dtype=Float32, + description="User's average sentiment score", + ), + Field( + name="user_text_count", + dtype=Int64, + description="Total number of texts by user", + ), + Field( + name="user_avg_text_length", + dtype=Float32, + description="User's average text length", + ), + ], + online=True, + source=sentiment_source, + tags={"team": "nlp", "domain": "user_behavior"}, +) + +# Request source for real-time inference +text_input_request = RequestSource( + name="text_input", + schema=[ + Field( + name="input_text", + dtype=String, + description="Text to analyze at request time", + ), + Field( + name="model_name", dtype=String, description="Model to use for prediction" + ), + ], +) + + +# On-demand feature view for real-time sentiment prediction +@on_demand_feature_view( + sources=[text_input_request], + schema=[ + Field(name="predicted_sentiment", dtype=String), + Field(name="sentiment_confidence", dtype=Float32), + Field(name="positive_prob", dtype=Float32), + Field(name="negative_prob", dtype=Float32), + Field(name="neutral_prob", dtype=Float32), + Field(name="text_embedding", dtype=Array(Float32)), + ], +) +def sentiment_prediction(inputs: pd.DataFrame) -> pd.DataFrame: + """ + Real-time sentiment prediction using pre-loaded static artifacts. + + This function demonstrates how to use static artifacts (pre-loaded models, + lookup tables) for efficient real-time inference. Models are loaded once + at feature server startup rather than on each request. + """ + try: + import numpy as np + except ImportError: + # Fallback to dummy predictions if numpy isn't available + + df = pd.DataFrame() + df["predicted_sentiment"] = ["neutral"] * len(inputs) + df["sentiment_confidence"] = [0.5] * len(inputs) + df["positive_prob"] = [0.33] * len(inputs) + df["negative_prob"] = [0.33] * len(inputs) + df["neutral_prob"] = [0.34] * len(inputs) + df["text_embedding"] = [[0.0] * 384] * len(inputs) + return df + + # Get pre-loaded static artifacts from global references + # These are loaded once at startup via static_artifacts.py + global _sentiment_model, _lookup_tables + + sentiment_model = _sentiment_model + lookup_tables = _lookup_tables + + # Use lookup table for label mapping (from static artifacts) + label_map = lookup_tables.get( + "sentiment_labels", + {"LABEL_0": "negative", "LABEL_1": "neutral", "LABEL_2": "positive"}, + ) + + results = [] + + for text in inputs["input_text"]: + try: + if sentiment_model is not None: + # Use pre-loaded model for prediction + predictions = sentiment_model(text) + + # Parse results using static lookup tables + scores = { + label_map.get(pred["label"], pred["label"]): pred["score"] + for pred in predictions + } + + # Get best prediction + best_pred = max(predictions, key=lambda x: x["score"]) + predicted_sentiment = label_map.get( + best_pred["label"], best_pred["label"] + ) + confidence = best_pred["score"] + else: + # Fallback when model is not available + predicted_sentiment = "neutral" + confidence = 0.5 + scores = {"positive": 0.33, "negative": 0.33, "neutral": 0.34} + + # Generate dummy embeddings (in production, use pre-loaded embeddings) + embedding = np.random.rand(384).tolist() + + results.append( + { + "predicted_sentiment": predicted_sentiment, + "sentiment_confidence": np.float32(confidence), + "positive_prob": np.float32(scores.get("positive", 0.0)), + "negative_prob": np.float32(scores.get("negative", 0.0)), + "neutral_prob": np.float32(scores.get("neutral", 0.0)), + "text_embedding": [np.float32(x) for x in embedding], + } + ) + + except Exception: + # Fallback for individual text processing errors + results.append( + { + "predicted_sentiment": "neutral", + "sentiment_confidence": np.float32(0.5), + "positive_prob": np.float32(0.33), + "negative_prob": np.float32(0.33), + "neutral_prob": np.float32(0.34), + "text_embedding": [np.float32(0.0)] * 384, + } + ) + + return pd.DataFrame(results) + + +# Feature services group related features for model serving +sentiment_analysis_v1 = FeatureService( + name="sentiment_analysis_v1", + features=[ + text_features_fv[["text_content", "text_length", "word_count"]], + sentiment_prediction, + ], + description="Basic sentiment analysis features for model v1", +) + +sentiment_analysis_v2 = FeatureService( + name="sentiment_analysis_v2", + features=[ + text_features_fv, # All text features + user_stats_fv[["user_avg_sentiment", "user_text_count"]], # User context + sentiment_prediction, # Real-time predictions + ], + description="Advanced sentiment analysis with user context for model v2", +) + +# Feature service for training data (historical features only) +sentiment_training_features = FeatureService( + name="sentiment_training_features", + features=[ + text_features_fv, + user_stats_fv, + ], + description="Historical features for model training and evaluation", +) diff --git a/sdk/python/feast/templates/pytorch_nlp/feature_repo/feature_store.yaml b/sdk/python/feast/templates/pytorch_nlp/feature_repo/feature_store.yaml new file mode 100644 index 00000000000..e7c306623c0 --- /dev/null +++ b/sdk/python/feast/templates/pytorch_nlp/feature_repo/feature_store.yaml @@ -0,0 +1,9 @@ +project: my_project +provider: local +registry: data/registry.db +online_store: + type: sqlite + path: data/online_store.db +offline_store: + type: file +entity_key_serialization_version: 3 \ No newline at end of file diff --git a/sdk/python/feast/templates/pytorch_nlp/feature_repo/static_artifacts.py b/sdk/python/feast/templates/pytorch_nlp/feature_repo/static_artifacts.py new file mode 100644 index 00000000000..6f7a5ae3091 --- /dev/null +++ b/sdk/python/feast/templates/pytorch_nlp/feature_repo/static_artifacts.py @@ -0,0 +1,139 @@ +""" +Static Artifacts Loading for PyTorch NLP Template + +This module demonstrates how to load static artifacts (models, lookup tables, etc.) +into the Feast feature server at startup for efficient real-time inference. + +Supported artifact types: +- Small ML models (transformers, scikit-learn, etc.) +- Lookup tables and reference data +- Configuration parameters +- Pre-computed embeddings + +Note: Feast is not optimized for large language models. For LLM inference, +use dedicated model serving solutions like vLLM, TensorRT-LLM, or TGI. +""" + +from pathlib import Path +from typing import Any, Dict, Optional + +from fastapi import FastAPI +from fastapi.logger import logger + + +def load_sentiment_model(): + """Load sentiment analysis model for real-time inference.""" + try: + from transformers import pipeline + + logger.info("Loading sentiment analysis model...") + model = pipeline( + "sentiment-analysis", + model="cardiffnlp/twitter-roberta-base-sentiment-latest", + tokenizer="cardiffnlp/twitter-roberta-base-sentiment-latest", + return_all_scores=True, + device="cpu", # Force CPU to avoid MPS forking issues on macOS + ) + logger.info("āœ… Sentiment analysis model loaded successfully") + return model + except ImportError: + logger.warning( + "āš ļø Transformers not available, sentiment model will use fallback" + ) + return None + except Exception as e: + logger.warning(f"āš ļø Failed to load sentiment model: {e}") + return None + + +def load_lookup_tables() -> Dict[str, Any]: + """Load static lookup tables for feature engineering.""" + # Example: Load static mappings that are expensive to compute at request time + return { + "sentiment_labels": { + "LABEL_0": "negative", + "LABEL_1": "neutral", + "LABEL_2": "positive", + }, + "emoji_sentiment": {"😊": "positive", "šŸ˜ž": "negative", "😐": "neutral"}, + "domain_categories": {"twitter.com": "social", "news.com": "news"}, + } + + +def load_user_embeddings() -> Optional[Dict[str, Any]]: + """Load pre-computed user embeddings if available.""" + # Example: Load static user embeddings for recommendation features + embeddings_file = Path(__file__).parent / "data" / "user_embeddings.npy" + + if embeddings_file.exists(): + try: + import numpy as np + + embeddings = np.load(embeddings_file) + logger.info(f"āœ… Loaded user embeddings: {embeddings.shape}") + return {"embeddings": embeddings} + except Exception as e: + logger.warning(f"āš ļø Failed to load user embeddings: {e}") + + return None + + +def load_artifacts(app: FastAPI): + """ + Main function called by Feast feature server to load static artifacts. + + This function is called during server startup and should store artifacts + in app.state for access by on-demand feature views. + """ + logger.info("šŸ”„ Loading static artifacts for PyTorch NLP template...") + + # Load sentiment analysis model + app.state.sentiment_model = load_sentiment_model() + + # Load lookup tables + app.state.lookup_tables = load_lookup_tables() + + # Load user embeddings (optional) + app.state.user_embeddings = load_user_embeddings() + + # Also set global references for easier access from on-demand feature views + try: + import example_repo + + example_repo._sentiment_model = app.state.sentiment_model + example_repo._lookup_tables = app.state.lookup_tables + logger.info("āœ… Global artifact references updated") + except ImportError: + logger.warning("āš ļø Could not update global artifact references") + + logger.info("āœ… Static artifacts loading completed") + + +def get_static_artifact(app_state: Any, name: str) -> Any: + """ + Helper function to safely access static artifacts from app.state. + + Args: + app_state: FastAPI app.state object + name: Name of the artifact to retrieve + + Returns: + The requested artifact or None if not found + """ + return getattr(app_state, name, None) + + +# Convenience functions for accessing specific artifacts +def get_sentiment_model(app_state: Any): + """Get the pre-loaded sentiment analysis model.""" + return get_static_artifact(app_state, "sentiment_model") + + +def get_lookup_tables(app_state: Any) -> Dict[str, Any]: + """Get the pre-loaded lookup tables.""" + return get_static_artifact(app_state, "lookup_tables") or {} + + +def get_user_embeddings(app_state: Any): + """Get the pre-loaded user embeddings.""" + return get_static_artifact(app_state, "user_embeddings") diff --git a/sdk/python/feast/templates/pytorch_nlp/feature_repo/test_workflow.py b/sdk/python/feast/templates/pytorch_nlp/feature_repo/test_workflow.py new file mode 100644 index 00000000000..efca72fc3bc --- /dev/null +++ b/sdk/python/feast/templates/pytorch_nlp/feature_repo/test_workflow.py @@ -0,0 +1,350 @@ +""" +PyTorch NLP Sentiment Analysis - Complete Test Workflow + +This script demonstrates the full lifecycle of a sentiment analysis project using Feast: +1. Feature store setup and deployment +2. Historical feature retrieval for model training +3. Online feature serving for real-time inference +4. Integration with PyTorch and Hugging Face models +5. Performance evaluation and monitoring +""" + +import subprocess +from datetime import datetime, timedelta + +import pandas as pd + +from feast import FeatureStore + + +def run_demo(): + """Run the complete PyTorch NLP sentiment analysis demo.""" + print("šŸŽ­ PyTorch NLP Sentiment Analysis Demo") + print("=====================================") + + store = FeatureStore(repo_path=".") + + # 1. Deploy feature definitions + print("\nšŸš€ Step 1: Deploy feature definitions") + print("--- Run feast apply ---") + subprocess.run(["feast", "apply"]) + + # 2. Materialize features to online store + print("\nšŸ’¾ Step 2: Materialize features to online store") + print("--- Load features into online store ---") + store.materialize_incremental(end_date=datetime.now()) + + # 3. Demonstrate historical feature retrieval for training + print("\nšŸ“š Step 3: Historical features for model training") + training_data = fetch_historical_features_for_training(store) + + # 4. Simulate model training (conceptual) + print("\nšŸ‹ļø Step 4: Simulate model training") + simulate_model_training(training_data) + + # 5. Online feature serving for real-time inference + print("\n⚔ Step 5: Real-time inference with online features") + test_online_inference(store) + + # 6. Demonstrate on-demand feature views + print("\nšŸ”® Step 6: On-demand sentiment prediction") + test_on_demand_sentiment_prediction(store) + + # 7. Feature service usage + print("\nšŸŽÆ Step 7: Feature services for model versioning") + test_feature_services(store) + + # 8. Performance evaluation + print("\nšŸ“Š Step 8: Model performance evaluation") + evaluate_model_performance(store) + + print("\n✨ Demo completed successfully!") + print("\nšŸ“– Next steps:") + print(" - Modify the sentiment data in data/sentiment_data.parquet") + print(" - Experiment with different models in example_repo.py") + print(" - Add more feature engineering transformations") + print(" - Deploy to production with cloud providers (AWS, GCP, etc.)") + + +def fetch_historical_features_for_training(store: FeatureStore) -> pd.DataFrame: + """Fetch historical features for model training with point-in-time correctness.""" + # Create entity DataFrame for training + # In practice, this would come from your ML pipeline or data warehouse + entity_df = pd.DataFrame.from_dict( + { + "text_id": [ + "text_0001", + "text_0002", + "text_0003", + "text_0004", + "text_0005", + "text_0010", + "text_0015", + "text_0020", + "text_0025", + "text_0030", + ], + "user_id": [ + "user_001", + "user_002", + "user_001", + "user_003", + "user_002", + "user_001", + "user_004", + "user_003", + "user_005", + "user_001", + ], + "event_timestamp": [ + datetime(2023, 6, 15, 10, 0, 0), + datetime(2023, 6, 15, 11, 30, 0), + datetime(2023, 6, 15, 14, 15, 0), + datetime(2023, 6, 16, 9, 45, 0), + datetime(2023, 6, 16, 13, 20, 0), + datetime(2023, 6, 17, 8, 30, 0), + datetime(2023, 6, 17, 16, 45, 0), + datetime(2023, 6, 18, 12, 10, 0), + datetime(2023, 6, 18, 15, 30, 0), + datetime(2023, 6, 19, 11, 0, 0), + ], + } + ) + + # Fetch historical features using the training feature service + print(" šŸ“Š Retrieving training dataset with point-in-time correctness...") + training_df = store.get_historical_features( + entity_df=entity_df, + features=[ + "text_features:text_content", + "text_features:sentiment_label", + "text_features:sentiment_score", + "text_features:text_length", + "text_features:word_count", + "text_features:exclamation_count", + "text_features:caps_ratio", + "text_features:emoji_count", + "user_stats:user_avg_sentiment", + "user_stats:user_text_count", + ], + ).to_df() + + print(f" āœ… Retrieved {len(training_df)} training samples") + print(" šŸ“‹ Sample training data:") + print( + training_df[ + ["text_content", "sentiment_label", "text_length", "word_count"] + ].head(3) + ) + + return training_df + + +def simulate_model_training(training_data: pd.DataFrame): + """Simulate model training process (conceptual implementation).""" + print(" 🧠 Training sentiment analysis model...") + + # In a real implementation, you would: + # 1. Split data into train/validation/test + # 2. Tokenize text using transformers tokenizer + # 3. Fine-tune a pre-trained model (BERT, RoBERTa, etc.) + # 4. Evaluate performance metrics + # 5. Save the trained model + + print(f" šŸ“Š Training data shape: {training_data.shape}") + + if not training_data.empty: + # Simple statistics as a proxy for training + sentiment_dist = training_data["sentiment_label"].value_counts() + avg_text_length = training_data["text_length"].mean() + + print(" šŸ“ˆ Sentiment distribution:") + for sentiment, count in sentiment_dist.items(): + print( + f" {sentiment}: {count} samples ({count / len(training_data) * 100:.1f}%)" + ) + + print(f" šŸ“ Average text length: {avg_text_length:.1f} characters") + print(" āœ… Model training simulation completed!") + else: + print(" āš ļø No training data available") + + +def test_online_inference(store: FeatureStore): + """Test online feature serving for real-time inference.""" + print(" ⚔ Testing real-time feature serving...") + + # Entity rows for online inference + entity_rows = [ + {"text_id": "text_0001", "user_id": "user_001"}, + {"text_id": "text_0002", "user_id": "user_002"}, + {"text_id": "text_0005", "user_id": "user_002"}, + ] + + # Fetch online features + online_features = store.get_online_features( + features=[ + "text_features:text_content", + "text_features:text_length", + "text_features:word_count", + "user_stats:user_avg_sentiment", + ], + entity_rows=entity_rows, + ).to_dict() + + print(" šŸ“Š Retrieved online features:") + for key, values in online_features.items(): + if key in ["text_content"]: + # Truncate long text for display + display_values = [ + str(v)[:50] + "..." if len(str(v)) > 50 else str(v) for v in values + ] + print(f" {key}: {display_values}") + else: + print(f" {key}: {values}") + + +def test_on_demand_sentiment_prediction(store: FeatureStore): + """Test on-demand feature views for real-time sentiment prediction.""" + print(" šŸ”® Testing on-demand sentiment prediction...") + + # Request data for on-demand features + entity_rows = [ + { + "input_text": "I love this product! It's absolutely amazing and works perfectly!", + "model_name": "cardiffnlp/twitter-roberta-base-sentiment-latest", + }, + { + "input_text": "This is terrible quality. Completely disappointed with the purchase.", + "model_name": "cardiffnlp/twitter-roberta-base-sentiment-latest", + }, + { + "input_text": "The product is okay. Nothing special but it does work as expected.", + "model_name": "cardiffnlp/twitter-roberta-base-sentiment-latest", + }, + ] + + try: + # Get on-demand predictions + predictions = store.get_online_features( + features=[ + "sentiment_prediction:predicted_sentiment", + "sentiment_prediction:sentiment_confidence", + "sentiment_prediction:positive_prob", + "sentiment_prediction:negative_prob", + "sentiment_prediction:neutral_prob", + ], + entity_rows=entity_rows, + ).to_dict() + + print(" šŸŽÆ Prediction results:") + for i in range(len(entity_rows)): + text = entity_rows[i]["input_text"][:60] + "..." + sentiment = predictions["predicted_sentiment"][i] + confidence = predictions["sentiment_confidence"][i] + print(f" Text: {text}") + print(f" Predicted: {sentiment} (confidence: {confidence:.3f})") + print( + f" Probabilities: P={predictions['positive_prob'][i]:.3f}, " + f"N={predictions['negative_prob'][i]:.3f}, " + f"Neu={predictions['neutral_prob'][i]:.3f}" + ) + print() + + except Exception as e: + print(f" āš ļø On-demand prediction failed: {e}") + print( + " šŸ’” This is expected if PyTorch/transformers dependencies are not installed" + ) + print(" šŸ“¦ Install with: pip install torch transformers") + + +def test_feature_services(store: FeatureStore): + """Test different feature services for model versioning.""" + print(" šŸŽÆ Testing feature services...") + + entity_rows = [{"text_id": "text_0001", "user_id": "user_001"}] + + # Test basic sentiment analysis service (v1) + print(" šŸ“¦ Testing sentiment_analysis_v1 feature service...") + try: + features_v1 = store.get_online_features( + features=store.get_feature_service("sentiment_analysis_v1"), + entity_rows=entity_rows, + ).to_dict() + print(f" āœ… Retrieved {len(features_v1)} feature types for v1") + except Exception as e: + print(f" āš ļø Feature service v1 failed: {e}") + + # Test advanced sentiment analysis service (v2) + print(" šŸ“¦ Testing sentiment_analysis_v2 feature service...") + try: + features_v2 = store.get_online_features( + features=store.get_feature_service("sentiment_analysis_v2"), + entity_rows=entity_rows, + ).to_dict() + print(f" āœ… Retrieved {len(features_v2)} feature types for v2") + except Exception as e: + print(f" āš ļø Feature service v2 failed: {e}") + + +def evaluate_model_performance(store: FeatureStore): + """Evaluate model performance using historical features.""" + print(" šŸ“Š Evaluating model performance...") + + try: + # Get a sample of historical data for evaluation + entity_df = pd.DataFrame( + { + "text_id": [f"text_{i:04d}" for i in range(1, 21)], + "user_id": [f"user_{(i % 5) + 1:03d}" for i in range(1, 21)], + "event_timestamp": [ + datetime.now() - timedelta(hours=i) for i in range(20) + ], + } + ) + + # Fetch features and labels + eval_df = store.get_historical_features( + entity_df=entity_df, + features=[ + "text_features:text_content", + "text_features:sentiment_label", + "text_features:sentiment_score", + ], + ).to_df() + + if not eval_df.empty and "sentiment_label" in eval_df.columns: + # Calculate basic performance metrics + sentiment_dist = eval_df["sentiment_label"].value_counts() + avg_score = ( + eval_df["sentiment_score"].mean() + if "sentiment_score" in eval_df.columns + else 0 + ) + + print(" šŸ“ˆ Performance summary:") + print(f" Evaluation samples: {len(eval_df)}") + print(f" Average sentiment score: {avg_score:.3f}") + print(" Class distribution:") + for sentiment, count in sentiment_dist.items(): + print( + f" {sentiment}: {count} ({count / len(eval_df) * 100:.1f}%)" + ) + + # In a real implementation, you would: + # 1. Compare predicted vs actual labels + # 2. Calculate accuracy, precision, recall, F1-score + # 3. Generate confusion matrix + # 4. Analyze error cases + # 5. Monitor model drift over time + + else: + print(" āš ļø No evaluation data available") + + except Exception as e: + print(f" āš ļø Evaluation failed: {e}") + + +if __name__ == "__main__": + run_demo() diff --git a/sdk/python/feast/templates/pytorch_nlp/gitignore b/sdk/python/feast/templates/pytorch_nlp/gitignore new file mode 100644 index 00000000000..88196cd1f22 --- /dev/null +++ b/sdk/python/feast/templates/pytorch_nlp/gitignore @@ -0,0 +1,180 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/#use-with-ide +.pdm.toml + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be added to the global gitignore or merged into this project gitignore. For a PyCharm +# project, it is recommended to include the following directory in .gitignore: +# https://intellij-support.jetbrains.com/hc/en/articles/206544839 + +# Feast-specific +feature_repo/data/*.db +feature_repo/data/*.db.lock +feature_repo/data/online_store.db* +feature_repo/data/registry.db* +feature_repo/data/registry.pb* + +# Model cache +.cache/ +huggingface/ +transformers_cache/ + +# MacOS +.DS_Store + +# IDE +.vscode/ +.idea/ +*.swp +*.swo + +# Logs +*.log \ No newline at end of file diff --git a/sdk/python/feast/templates/ray/README.md b/sdk/python/feast/templates/ray/README.md index dd2d8c9f7f1..4004b8e9a59 100644 --- a/sdk/python/feast/templates/ray/README.md +++ b/sdk/python/feast/templates/ray/README.md @@ -8,7 +8,7 @@ This template demonstrates Feast's Ray integration, showcasing both the **Ray Of ray_template/ ā”œā”€ā”€ feature_repo/ │ ā”œā”€ā”€ feature_store.yaml # Ray offline store + compute engine config -│ ā”œā”€ā”€ example_repo.py # Feature definitions with Ray optimizations +│ ā”œā”€ā”€ feature_definitions.py # Feature definitions with Ray optimizations │ ā”œā”€ā”€ test_workflow.py # Demo script showing Ray capabilities │ └── data/ # Sample datasets (generated by bootstrap) │ ā”œā”€ā”€ driver_stats.parquet diff --git a/sdk/python/feast/templates/ray/bootstrap.py b/sdk/python/feast/templates/ray/bootstrap.py index 30f5bf7dd00..6baf387d921 100644 --- a/sdk/python/feast/templates/ray/bootstrap.py +++ b/sdk/python/feast/templates/ray/bootstrap.py @@ -69,8 +69,8 @@ def bootstrap(): path=str(customer_profile_path), allow_truncated_timestamps=True ) - # Update the example_repo.py file with actual paths - example_py_file = repo_path / "example_repo.py" + # Update the feature_definitions.py file with actual paths + example_py_file = repo_path / "feature_definitions.py" replace_str_in_file(example_py_file, "%PROJECT_NAME%", str(project_name)) replace_str_in_file( example_py_file, "%PARQUET_PATH%", str(driver_stats_path.relative_to(repo_path)) diff --git a/sdk/python/feast/templates/ray/feature_repo/example_repo.py b/sdk/python/feast/templates/ray/feature_repo/feature_definitions.py similarity index 100% rename from sdk/python/feast/templates/ray/feature_repo/example_repo.py rename to sdk/python/feast/templates/ray/feature_repo/feature_definitions.py diff --git a/sdk/python/feast/templates/ray_rag/README.md b/sdk/python/feast/templates/ray_rag/README.md index e1289826c6a..240288b88d0 100644 --- a/sdk/python/feast/templates/ray_rag/README.md +++ b/sdk/python/feast/templates/ray_rag/README.md @@ -16,7 +16,7 @@ RAG (Retrieval-Augmented Generation) template using Feast with Ray for distribut ray_rag/ ā”œā”€ā”€ feature_repo/ │ ā”œā”€ā”€ feature_store.yaml # Ray + Milvus configuration -│ ā”œā”€ā”€ example_repo.py # Feature definitions with Ray UDF +│ ā”œā”€ā”€ feature_definitions.py # Feature definitions with Ray UDF │ ā”œā”€ā”€ test_workflow.py # End-to-end demo │ └── data/ │ └── raw_movies.parquet # Sample IMDB dataset (10 movies) diff --git a/sdk/python/feast/templates/ray_rag/bootstrap.py b/sdk/python/feast/templates/ray_rag/bootstrap.py index 752cee0a5c8..a903969eaef 100644 --- a/sdk/python/feast/templates/ray_rag/bootstrap.py +++ b/sdk/python/feast/templates/ray_rag/bootstrap.py @@ -74,7 +74,7 @@ def bootstrap(): pq.write_table(table, parquet_file) print(f" āœ… Created sample dataset with {len(sample_data)} movies") - example_py_file = repo_path / "example_repo.py" + example_py_file = repo_path / "feature_definitions.py" replace_str_in_file(example_py_file, "%PROJECT_NAME%", str(project_name)) print("šŸš€ Ray RAG template initialized successfully!") diff --git a/sdk/python/feast/templates/ray_rag/feature_repo/example_repo.py b/sdk/python/feast/templates/ray_rag/feature_repo/feature_definitions.py similarity index 100% rename from sdk/python/feast/templates/ray_rag/feature_repo/example_repo.py rename to sdk/python/feast/templates/ray_rag/feature_repo/feature_definitions.py diff --git a/sdk/python/feast/templates/spark/feature_repo/example_repo.py b/sdk/python/feast/templates/spark/feature_repo/feature_definitions.py similarity index 100% rename from sdk/python/feast/templates/spark/feature_repo/example_repo.py rename to sdk/python/feast/templates/spark/feature_repo/feature_definitions.py diff --git a/sdk/python/feast/ui/package.json b/sdk/python/feast/ui/package.json index 45cb24e2e54..f4975fe2c21 100644 --- a/sdk/python/feast/ui/package.json +++ b/sdk/python/feast/ui/package.json @@ -6,7 +6,7 @@ "@elastic/datemath": "^5.0.3", "@elastic/eui": "^72.0.0", "@emotion/react": "^11.9.0", - "@feast-dev/feast-ui": "0.56.0", + "@feast-dev/feast-ui": "0.57.0", "@testing-library/jest-dom": "^5.16.4", "@testing-library/react": "^13.2.0", "@testing-library/user-event": "^13.5.0", diff --git a/sdk/python/feast/ui/yarn.lock b/sdk/python/feast/ui/yarn.lock index 1f5bfdbda7b..7007b8c72ec 100644 --- a/sdk/python/feast/ui/yarn.lock +++ b/sdk/python/feast/ui/yarn.lock @@ -1575,10 +1575,10 @@ minimatch "^3.1.2" strip-json-comments "^3.1.1" -"@feast-dev/feast-ui@0.56.0": - version "0.56.0" - resolved "https://registry.yarnpkg.com/@feast-dev/feast-ui/-/feast-ui-0.56.0.tgz#69a08ce552a163ae37e5af5b363459a7615e7907" - integrity sha512-FkmT6oUzuEj/NKZ5+3aBmMVkNZ7CHB+58KW9YS7Gb8BoxjP97ALO8ERV6VONK+xLou2njubinKzzsBZwy5joIA== +"@feast-dev/feast-ui@0.57.0": + version "0.57.0" + resolved "https://registry.yarnpkg.com/@feast-dev/feast-ui/-/feast-ui-0.57.0.tgz#0877b7f7fb39dd5cc74aabd9e77b79ff043d5a0a" + integrity sha512-lNegnVW1HnRT3NB54vaEuaO8pNDU5xNI/tDJNfz8HSXmvJcZ5Xiz1c/go9sRskOClUMrC4xJN/kw9F7IVMsnCw== dependencies: "@elastic/datemath" "^5.0.3" "@elastic/eui" "^95.12.0" diff --git a/sdk/python/pytest.ini b/sdk/python/pytest.ini index d79459c0d0e..767b75a6a29 100644 --- a/sdk/python/pytest.ini +++ b/sdk/python/pytest.ini @@ -10,8 +10,8 @@ env = IS_TEST=True filterwarnings = - error::_pytest.warning_types.PytestConfigWarning - error::_pytest.warning_types.PytestUnhandledCoroutineWarning + error::pytest.PytestConfigWarning + error:.*was never awaited.*:RuntimeWarning ignore::DeprecationWarning:pyspark.sql.pandas.*: ignore::DeprecationWarning:pyspark.sql.connect.*: ignore::DeprecationWarning:httpx.*: diff --git a/sdk/python/tests/conftest.py b/sdk/python/tests/conftest.py index a57846c7e2e..267dd074ffd 100644 --- a/sdk/python/tests/conftest.py +++ b/sdk/python/tests/conftest.py @@ -36,34 +36,60 @@ create_document_dataset, create_image_dataset, ) -from tests.integration.feature_repos.integration_test_repo_config import ( # noqa: E402 - IntegrationTestRepoConfig, -) -from tests.integration.feature_repos.repo_configuration import ( # noqa: E402 - AVAILABLE_OFFLINE_STORES, - AVAILABLE_ONLINE_STORES, - OFFLINE_STORE_TO_PROVIDER_CONFIG, - Environment, - TestData, - construct_test_environment, - construct_universal_feature_views, - construct_universal_test_data, -) -from tests.integration.feature_repos.universal.data_sources.file import ( # noqa: E402 - FileDataSourceCreator, -) -from tests.integration.feature_repos.universal.entities import ( # noqa: E402 - customer, - driver, - location, -) -from tests.utils.auth_permissions_util import default_store from tests.utils.http_server import check_port_open, free_port # noqa: E402 -from tests.utils.ssl_certifcates_util import ( - combine_trust_stores, - create_ca_trust_store, - generate_self_signed_cert, -) + +try: + from tests.integration.feature_repos.integration_test_repo_config import ( # noqa: E402 + IntegrationTestRepoConfig, + ) + from tests.integration.feature_repos.repo_configuration import ( # noqa: E402 + AVAILABLE_OFFLINE_STORES, + AVAILABLE_ONLINE_STORES, + OFFLINE_STORE_TO_PROVIDER_CONFIG, + Environment, + TestData, + construct_test_environment, + construct_universal_feature_views, + construct_universal_test_data, + ) + from tests.integration.feature_repos.universal.data_sources.file import ( # noqa: E402 + FileDataSourceCreator, + ) + from tests.integration.feature_repos.universal.entities import ( # noqa: E402 + customer, + driver, + location, + ) +except ModuleNotFoundError: + IntegrationTestRepoConfig = None # type: ignore[assignment] + Environment = None # type: ignore[assignment] + TestData = None # type: ignore[assignment] + AVAILABLE_OFFLINE_STORES = None # type: ignore[assignment] + AVAILABLE_ONLINE_STORES = None # type: ignore[assignment] + OFFLINE_STORE_TO_PROVIDER_CONFIG = None # type: ignore[assignment] + construct_test_environment = None # type: ignore[assignment] + construct_universal_feature_views = None # type: ignore[assignment] + construct_universal_test_data = None # type: ignore[assignment] + FileDataSourceCreator = None # type: ignore[assignment] + customer = None # type: ignore[assignment] + driver = None # type: ignore[assignment] + location = None # type: ignore[assignment] + +try: + from tests.utils.auth_permissions_util import default_store +except ModuleNotFoundError: + default_store = None # type: ignore[assignment] + +try: + from tests.utils.ssl_certifcates_util import ( + combine_trust_stores, + create_ca_trust_store, + generate_self_signed_cert, + ) +except ModuleNotFoundError: + combine_trust_stores = None # type: ignore[assignment] + create_ca_trust_store = None # type: ignore[assignment] + generate_self_signed_cert = None # type: ignore[assignment] logger = logging.getLogger(__name__) @@ -85,7 +111,7 @@ def pytest_configure(config): - if platform in ["darwin", "windows"]: + if platform == "darwin" or platform.startswith("win"): multiprocessing.set_start_method("spawn", force=True) else: multiprocessing.set_start_method("fork") @@ -239,10 +265,10 @@ def pytest_generate_tests(metafunc: pytest.Metafunc): See more examples at https://docs.pytest.org/en/6.2.x/example/parametrize.html#paramexamples - We also utilize indirect parametrization here. Since `environment` is a fixture, + We also utilize indirect parametrization here. Since [environment](cci:1://file:///c:/Users/brass/OneDrive/Desktop/Work/App/Lanre/feast/sdk/python/tests/conftest.py:229:0-245:16) is a fixture, when we call metafunc.parametrize("environment", ..., indirect=True) we actually parametrizing this "environment" fixture and not the test itself. - Moreover, by utilizing `_config_cache` we are able to share `environment` fixture between different tests. + Moreover, by utilizing `_config_cache` we are able to share [environment](cci:1://file:///c:/Users/brass/OneDrive/Desktop/Work/App/Lanre/feast/sdk/python/tests/conftest.py:229:0-245:16) fixture between different tests. In order for pytest to group tests together (and share environment fixture) parameter should point to the same Python object (hence, we use _config_cache dict to store those objects). """ diff --git a/sdk/python/tests/unit/infra/offline_stores/test_clickhouse.py b/sdk/python/tests/unit/infra/offline_stores/test_clickhouse.py index 38c632a59a7..f5440ed367d 100644 --- a/sdk/python/tests/unit/infra/offline_stores/test_clickhouse.py +++ b/sdk/python/tests/unit/infra/offline_stores/test_clickhouse.py @@ -1,3 +1,4 @@ +import logging import threading from unittest.mock import MagicMock, patch @@ -6,6 +7,8 @@ from feast.infra.utils.clickhouse.clickhouse_config import ClickhouseConfig from feast.infra.utils.clickhouse.connection_utils import get_client, thread_local +logger = logging.getLogger(__name__) + @pytest.fixture def clickhouse_config(): @@ -76,3 +79,57 @@ def thread_2_work(): assert client_1a is not client_2, ( "Different threads should get different client instances (not cached)" ) + + +def test_clickhouse_config_parses_additional_client_args(): + """ + Test that ClickhouseConfig correctly parses additional_client_args from a dict, + simulating how it would be parsed from YAML by Pydantic. + """ + # This simulates the dict that would come from yaml.safe_load() + raw_config = { + "host": "localhost", + "port": 8123, + "database": "default", + "user": "default", + "password": "password", + "additional_client_args": { + "send_receive_timeout": 60, + "compress": True, + "client_name": "feast_test", + }, + } + + # Pydantic should parse this dict into a ClickhouseConfig object + config = ClickhouseConfig(**raw_config) + + # Verify all fields are correctly parsed + assert config.host == "localhost" + assert config.port == 8123 + assert config.database == "default" + assert config.user == "default" + assert config.password == "password" + + # Verify additional_client_args is correctly parsed as a dict + assert config.additional_client_args is not None + assert isinstance(config.additional_client_args, dict) + assert config.additional_client_args["send_receive_timeout"] == 60 + assert config.additional_client_args["compress"] is True + assert config.additional_client_args["client_name"] == "feast_test" + + +def test_clickhouse_config_handles_none_additional_client_args(): + """ + Test that ClickhouseConfig correctly handles when additional_client_args is not provided. + """ + raw_config = { + "host": "localhost", + "port": 8123, + "database": "default", + "user": "default", + "password": "password", + } + + config = ClickhouseConfig(**raw_config) + + assert config.additional_client_args is None diff --git a/sdk/python/tests/unit/infra/scaffolding/test_repo_operations.py b/sdk/python/tests/unit/infra/scaffolding/test_repo_operations.py index 2f274d539ff..672c8f43d96 100644 --- a/sdk/python/tests/unit/infra/scaffolding/test_repo_operations.py +++ b/sdk/python/tests/unit/infra/scaffolding/test_repo_operations.py @@ -180,10 +180,10 @@ def test_parse_repo_with_future_annotations(): repo_path = Path(temp_path / "my_project" / "feature_repo") assert result.returncode == 0 - with open(repo_path / "example_repo.py", "r") as f: + with open(repo_path / "feature_definitions.py", "r") as f: existing_content = f.read() - with open(repo_path / "example_repo.py", "w") as f: + with open(repo_path / "feature_definitions.py", "w") as f: f.write("from __future__ import annotations" + "\n" + existing_content) repo_contents = parse_repo(repo_path) diff --git a/sdk/python/tests/unit/local_feast_tests/test_init.py b/sdk/python/tests/unit/local_feast_tests/test_init.py index 4543a239796..1f3c0993845 100644 --- a/sdk/python/tests/unit/local_feast_tests/test_init.py +++ b/sdk/python/tests/unit/local_feast_tests/test_init.py @@ -67,3 +67,17 @@ def test_repo_init_with_underscore_in_project_name() -> None: ) result = runner.run(["apply"], cwd=repo_dir) assert result.returncode != 0 + + +def test_postgres_template_registry_path_is_parameterized() -> None: + template_fs_yaml = ( + Path(__file__).resolve().parents[3] + / "feast" + / "templates" + / "postgres" + / "feature_repo" + / "feature_store.yaml" + ) + contents = template_fs_yaml.read_text(encoding="utf-8") + expected = "path: postgresql://DB_USERNAME:DB_PASSWORD@DB_HOST:DB_PORT/DB_NAME" + assert expected in contents diff --git a/sdk/python/tests/unit/test_feature_server.py b/sdk/python/tests/unit/test_feature_server.py index 21c01d61765..e3fd0387fb9 100644 --- a/sdk/python/tests/unit/test_feature_server.py +++ b/sdk/python/tests/unit/test_feature_server.py @@ -207,3 +207,204 @@ def test_materialize_request_model(): assert req2.disable_event_timestamp is False assert req2.start_ts == "2021-01-01T00:00:00" assert req2.end_ts == "2021-01-02T00:00:00" + + +# Static Artifacts Tests +@pytest.fixture +def mock_store_with_static_artifacts(tmp_path): + """Create a mock store with static_artifacts.py file for testing.""" + # Create static_artifacts.py file + static_artifacts_content = ''' +from fastapi import FastAPI +from fastapi.logger import logger + +def load_test_model(): + """Mock model loading for testing.""" + logger.info("Loading test model...") + return "test_model_loaded" + +def load_test_lookup_tables(): + """Mock lookup tables for testing.""" + return {"test_label": "test_value"} + +def load_artifacts(app: FastAPI): + """Load test static artifacts.""" + app.state.test_model = load_test_model() + app.state.test_lookup_tables = load_test_lookup_tables() + logger.info("āœ… Test static artifacts loaded") +''' + + # Write static_artifacts.py to temp directory + artifacts_file = tmp_path / "static_artifacts.py" + artifacts_file.write_text(static_artifacts_content) + + # Create mock store + mock_store = MagicMock() + mock_store.repo_path = str(tmp_path) + return mock_store + + +def test_load_static_artifacts_success(mock_store_with_static_artifacts): + """Test successful loading of static artifacts during server startup.""" + import asyncio + + from fastapi import FastAPI + + from feast.feature_server import load_static_artifacts + + app = FastAPI() + + # Load static artifacts + asyncio.run(load_static_artifacts(app, mock_store_with_static_artifacts)) + + # Verify artifacts were loaded into app.state + assert hasattr(app.state, "test_model") + assert hasattr(app.state, "test_lookup_tables") + assert app.state.test_model == "test_model_loaded" + assert app.state.test_lookup_tables == {"test_label": "test_value"} + + +def test_load_static_artifacts_no_file(tmp_path): + """Test graceful handling when static_artifacts.py doesn't exist.""" + import asyncio + + from fastapi import FastAPI + + from feast.feature_server import load_static_artifacts + + app = FastAPI() + mock_store = MagicMock() + mock_store.repo_path = str(tmp_path) # Empty directory + + # Should not raise an exception + asyncio.run(load_static_artifacts(app, mock_store)) + + # Should not have added test artifacts + assert not hasattr(app.state, "test_model") + assert not hasattr(app.state, "test_lookup_tables") + + +def test_load_static_artifacts_invalid_file(tmp_path): + """Test graceful handling when static_artifacts.py has errors.""" + import asyncio + + from fastapi import FastAPI + + from feast.feature_server import load_static_artifacts + + # Create invalid static_artifacts.py + artifacts_file = tmp_path / "static_artifacts.py" + artifacts_file.write_text("raise ValueError('Test error')") + + app = FastAPI() + mock_store = MagicMock() + mock_store.repo_path = str(tmp_path) + + # Should handle the error gracefully + asyncio.run(load_static_artifacts(app, mock_store)) + + # Should not have artifacts due to error + assert not hasattr(app.state, "test_model") + + +def test_load_static_artifacts_no_load_function(tmp_path): + """Test handling when static_artifacts.py has no load_artifacts function.""" + import asyncio + + from fastapi import FastAPI + + from feast.feature_server import load_static_artifacts + + # Create static_artifacts.py without load_artifacts function + artifacts_file = tmp_path / "static_artifacts.py" + artifacts_file.write_text("TEST_CONSTANT = 'test'") + + app = FastAPI() + mock_store = MagicMock() + mock_store.repo_path = str(tmp_path) + + # Should handle gracefully + asyncio.run(load_static_artifacts(app, mock_store)) + + # Should not have artifacts since no load_artifacts function + assert not hasattr(app.state, "test_model") + + +def test_static_artifacts_persist_across_requests(mock_store_with_static_artifacts): + """Test that static artifacts persist across multiple requests.""" + from feast.feature_server import get_app + + # Create app with static artifacts + app = get_app(mock_store_with_static_artifacts) + + # Simulate artifacts being loaded (normally done in lifespan) + app.state.test_model = "persistent_model" + app.state.test_lookup_tables = {"persistent": "data"} + + # Artifacts should be available and persistent + assert app.state.test_model == "persistent_model" + assert app.state.test_lookup_tables["persistent"] == "data" + + # After simulated requests, artifacts should still be there + assert app.state.test_model == "persistent_model" + assert app.state.test_lookup_tables["persistent"] == "data" + + +def test_pytorch_nlp_template_artifacts_pattern(tmp_path): + """Test the specific PyTorch NLP template static artifacts pattern.""" + import asyncio + + from fastapi import FastAPI + + from feast.feature_server import load_static_artifacts + + # Create PyTorch NLP template-style static_artifacts.py + pytorch_artifacts_content = ''' +from fastapi import FastAPI +from fastapi.logger import logger + +def load_sentiment_model(): + """Mock sentiment analysis model loading.""" + logger.info("Loading sentiment analysis model...") + return "mock_roberta_sentiment_model" + +def load_lookup_tables(): + """Load lookup tables for sentiment mapping.""" + return { + "sentiment_labels": {"LABEL_0": "negative", "LABEL_1": "neutral", "LABEL_2": "positive"}, + "emoji_sentiment": {"😊": "positive", "šŸ˜ž": "negative", "😐": "neutral"}, + } + +def load_artifacts(app: FastAPI): + """Load all static artifacts for PyTorch NLP template.""" + app.state.sentiment_model = load_sentiment_model() + app.state.lookup_tables = load_lookup_tables() + + # Update global references (simulating example_repo.py pattern) + # In real template, this would be: import example_repo; example_repo._sentiment_model = ... + logger.info("āœ… PyTorch NLP static artifacts loaded successfully") +''' + + artifacts_file = tmp_path / "static_artifacts.py" + artifacts_file.write_text(pytorch_artifacts_content) + + # Test loading + app = FastAPI() + mock_store = MagicMock() + mock_store.repo_path = str(tmp_path) + + asyncio.run(load_static_artifacts(app, mock_store)) + + # Verify PyTorch NLP template artifacts + assert hasattr(app.state, "sentiment_model") + assert hasattr(app.state, "lookup_tables") + assert app.state.sentiment_model == "mock_roberta_sentiment_model" + + # Verify lookup tables structure matches template + lookup_tables = app.state.lookup_tables + assert "sentiment_labels" in lookup_tables + assert "emoji_sentiment" in lookup_tables + assert lookup_tables["sentiment_labels"]["LABEL_0"] == "negative" + assert lookup_tables["sentiment_labels"]["LABEL_1"] == "neutral" + assert lookup_tables["sentiment_labels"]["LABEL_2"] == "positive" + assert lookup_tables["emoji_sentiment"]["😊"] == "positive" diff --git a/ui/package.json b/ui/package.json index dd9f240810b..78e5c4839f5 100644 --- a/ui/package.json +++ b/ui/package.json @@ -1,6 +1,6 @@ { "name": "@feast-dev/feast-ui", - "version": "0.57.0", + "version": "0.58.0", "private": false, "files": [ "dist"