Skip to content
Merged
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
remove unnecessary comments
Signed-off-by: tokoko <togurg14@freeuni.edu.ge>
  • Loading branch information
tokoko committed May 3, 2024
commit 9483192399ba926218868e6895290c7f607b6bc7
Original file line number Diff line number Diff line change
Expand Up @@ -42,43 +42,4 @@ def test_universal_materialization_consistency(environment):
# we use timestamp from generated dataframe as a split point
split_dt = df["ts_1"][4].to_pydatetime() - timedelta(seconds=1)

print(f"Split datetime: {split_dt}")

validate_offline_online_store_consistency(fs, driver_stats_fv, split_dt)


# @pytest.mark.integration
# def test_spark_materialization_consistency():
# spark_config = IntegrationTestRepoConfig(
# provider="local",
# online_store_creator=RedisOnlineStoreCreator,
# offline_store_creator=SparkDataSourceCreator,
# batch_engine={"type": "spark.engine", "partitions": 10},
# )
# spark_environment = construct_test_environment(
# spark_config, None, entity_key_serialization_version=1
# )


# driver_stats_fv = FeatureView(
# name="driver_hourly_stats",
# entities=[driver],
# ttl=timedelta(weeks=52),
# schema=[Field(name="value", dtype=Float32)],
# source=ds,
# )

# # try:
# fs.apply([driver, driver_stats_fv])

# # print(df)

# # # materialization is run in two steps and
# # # we use timestamp from generated dataframe as a split point
# # split_dt = df["ts_1"][4].to_pydatetime() - timedelta(seconds=1)

# # print(f"Split datetime: {split_dt}")

# # validate_offline_online_store_consistency(fs, driver_stats_fv, split_dt)
# # finally:
# # fs.teardown()