diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml
index fdf005901..1f011157e 100644
--- a/.github/FUNDING.yml
+++ b/.github/FUNDING.yml
@@ -9,4 +9,4 @@ community_bridge: # Replace with a single Community Bridge project-name e.g., cl
liberapay: # Replace with a single Liberapay username
issuehunt: # Replace with a single IssueHunt username
otechie: # Replace with a single Otechie username
-custom: ['https://bit.ly/2op1mu5']# Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']
+custom: []# Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']
diff --git a/.github/ISSUE_TEMPLATE/blank_issue.yml b/.github/ISSUE_TEMPLATE/blank_issue.yml
new file mode 100644
index 000000000..bbd855958
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/blank_issue.yml
@@ -0,0 +1,12 @@
+name: Blank Issue
+description: Submit an issue about Tensorflow.NET.
+labels: [Blank Issue]
+body:
+ - type: textarea
+ id: description
+ attributes:
+ label: Description
+ description: Please describe the issue here.
+ placeholder: Description
+ validations:
+ required: false
diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml
new file mode 100644
index 000000000..14e237951
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/bug_report.yml
@@ -0,0 +1,48 @@
+name: BUG Report
+description: Report a BUG of Tensorflow.NET.
+title: "[BUG Report]: "
+labels: [bug-report]
+body:
+ - type: markdown
+ attributes:
+ value: |
+ We welcome bug reports! Any unexpected behavior could be a BUG and this template help us gather the information to fix it.
+ - type: textarea
+ id: background
+ attributes:
+ label: Description
+ description: Please share a clear and concise description of the problem.
+ placeholder: Description
+ validations:
+ required: true
+ - type: textarea
+ id: repro-steps
+ attributes:
+ label: Reproduction Steps
+ description: |
+ Please include minimal steps to reproduce the problem if possible. E.g.: the smallest possible code snippet; or a small project, with steps to run it. It will greatly help us to locate the reason of the problem.
+ placeholder: Minimal Reproduction
+ validations:
+ required: false
+ - type: textarea
+ id: known-workarounds
+ attributes:
+ label: Known Workarounds
+ description: |
+ Please provide a description of any known workarounds.
+ placeholder: Known Workarounds
+ validations:
+ required: false
+ - type: textarea
+ id: configuration
+ attributes:
+ label: Configuration and Other Information
+ description: |
+ Please provide more information on your configuration:
+ * Which version of Tensorflow.NET is the code depending on?
+ * Which version of .NET runtime is the code running on?
+ * What is the OS?
+ * Any other information about this problem?
+ placeholder: Configuration
+ validations:
+ required: false
\ No newline at end of file
diff --git a/.github/ISSUE_TEMPLATE/documention_issue.yml b/.github/ISSUE_TEMPLATE/documention_issue.yml
new file mode 100644
index 000000000..f8a04e40f
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/documention_issue.yml
@@ -0,0 +1,30 @@
+name: Documentation Issue
+description: Report an issue about Tensorflow.NET ducumention or require a documention.
+title: "[Documention Issue]: "
+labels: [Documention Issue]
+body:
+ - type: markdown
+ attributes:
+ value: |
+ Welcome to suggest to Tensorflow.NET documention! This template will help us gather the information we need to improve it.
+ - type: textarea
+ id: brief-description
+ attributes:
+ label: Brief Description
+ description: Please describe the problem or the requst for new documention here.
+ placeholder: Description
+ validations:
+ required: true
+ - type: textarea
+ id: alternatives
+ attributes:
+ label: Alternatives
+ description: |
+ Please provide some alternative information here, if any.
+ placeholder: Alternatives
+ validations:
+ required: false
+ - type: markdown
+ attributes:
+ value: |
+ Thanks for your contributing!
diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml
new file mode 100644
index 000000000..9ce3f1663
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/feature_request.yml
@@ -0,0 +1,50 @@
+name: Feature Request
+description: Request/Propose a new feature of Tensorflow.NET.
+title: "[Feature Request]: "
+labels: [feature-request]
+body:
+ - type: markdown
+ attributes:
+ value: |
+ We welcome feature proposal/request! This template will help us gather the information we need to implement the new feature.
+ - type: textarea
+ id: background
+ attributes:
+ label: Background and Feature Description
+ description: Please describe the purpose and value of the new feature here. If the feature is linked to a specific problem, please describe it or put the link here.
+ placeholder: Purpose
+ validations:
+ required: true
+ - type: textarea
+ id: api-proposal
+ attributes:
+ label: API Definition and Usage
+ description: |
+ Please tell us the new API related to the requested feature, if any.
+ placeholder: API declaration (no method bodies)
+ value: |
+ ```cs
+ public Tensor NewFunc(Tensor x, int y);
+
+ var result = NewFunc(input, index);
+ ```
+ validations:
+ required: false
+ - type: textarea
+ id: alternatives
+ attributes:
+ label: Alternatives
+ description: |
+ Please provide some alternative information of the feature, if any. For example, if you request a feature which depends on a specific device, please provide the device information.
+ placeholder: Alternatives
+ validations:
+ required: false
+ - type: textarea
+ id: risks
+ attributes:
+ label: Risks
+ description: |
+ Please mention any risks that to your knowledge the API proposal might entail, such as breaking changes, performance regressions, etc.
+ placeholder: Risks
+ validations:
+ required: false
\ No newline at end of file
diff --git a/.github/ISSUE_TEMPLATE/performance_issue.yml b/.github/ISSUE_TEMPLATE/performance_issue.yml
new file mode 100644
index 000000000..cbe86d329
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/performance_issue.yml
@@ -0,0 +1,48 @@
+name: Performance Issue
+description: Submit an issue about performance problem or regression of Tensorflow.NET.
+title: "[Performance Issue]: "
+labels: [Performance Issue]
+body:
+ - type: markdown
+ attributes:
+ value: |
+ We welcome issues about Tensorflow.NET performance! This template will help us gather the information we need to locate the problem improve the performance.
+ - type: textarea
+ id: brief-description
+ attributes:
+ label: Brief Description
+ description: Please give a brief description about the performance issue here.
+ placeholder: Description
+ validations:
+ required: true
+ - type: textarea
+ id: device-and-context
+ attributes:
+ label: Device and Context
+ description: |
+ Please describe the device and context you used when you encounter the performance problem/regression.
+ placeholder: Device and Context
+ validations:
+ required: true
+ - type: textarea
+ id: benchmark
+ attributes:
+ label: Benchmark
+ description: |
+ We will appreciate it if you'd like to provide benchmark comparison of the performance issue.
+ placeholder: Benchmark
+ validations:
+ required: false
+ - type: textarea
+ id: alternatives
+ attributes:
+ label: Alternatives
+ description: |
+ Please provide some alternative information of the performance issue here, if any. For example, we'll appreciate it if you'd like to provide the the code to reproduce the performance problem.
+ placeholder: Alternatives
+ validations:
+ required: false
+ - type: markdown
+ attributes:
+ value: |
+ Thanks for your contributing!
diff --git a/.github/ISSUE_TEMPLATE/question.yml b/.github/ISSUE_TEMPLATE/question.yml
new file mode 100644
index 000000000..ca38be340
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/question.yml
@@ -0,0 +1,30 @@
+name: Question
+description: Ask any question about Tensorflow.NET and discuss with community members.
+title: "[Question]: "
+labels: [Question]
+body:
+ - type: markdown
+ attributes:
+ value: |
+ Any question about Tensorflow.NET is welcomed! This template will help us get your point.
+ - type: textarea
+ id: description
+ attributes:
+ label: Description
+ description: Please describe your question here.
+ placeholder: Description
+ validations:
+ required: true
+ - type: textarea
+ id: alternatives
+ attributes:
+ label: Alternatives
+ description: |
+ Please provide some alternative information here, if any.
+ placeholder: Alternatives
+ validations:
+ required: false
+ - type: markdown
+ attributes:
+ value: |
+ We are always willing to answer your questions!
diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml
new file mode 100644
index 000000000..9fd34fc49
--- /dev/null
+++ b/.github/workflows/build_and_test.yml
@@ -0,0 +1,66 @@
+# This workflow will build a .NET project
+# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-net
+
+name: build_and_test
+
+on:
+ push:
+ branches: [ "master" ]
+ pull_request:
+ branches: [ "master" ]
+ types: ["opened", "reopened", "synchronize", "ready_for_review", "auto_merge_enabled"]
+
+jobs:
+ windows:
+
+ runs-on: windows-latest
+
+ steps:
+ - uses: actions/checkout@v3
+ - name: Setup .NET 6
+ uses: actions/setup-dotnet@v3
+ with:
+ dotnet-version: 6.0.x
+ - name: Restore dependencies
+ run: dotnet restore
+ - name: Build CPU version
+ run: dotnet build --no-restore
+ - name: Test CPU version
+ run: dotnet test --no-build --verbosity normal
+ - name: uninstall redist cpu for unit tests
+ run: dotnet remove tools/Tensorflow.UnitTest.RedistHolder package SciSharp.TensorFlow.Redist
+ - name: install redist gpu for unit tests
+ run: dotnet add tools/Tensorflow.UnitTest.RedistHolder package SciSharp.TensorFlow.Redist-Windows-GPU
+ - name: Restore dependencies
+ run: dotnet restore
+ - name: Build GPU version
+ run: dotnet build --no-restore
+# - name: Test GPU version
+# run: dotnet test --no-build --verbosity normal
+
+ linux:
+
+ runs-on: ubuntu-latest
+
+ steps:
+ - uses: actions/checkout@v3
+ - name: Setup .NET
+ uses: actions/setup-dotnet@v3
+ with:
+ dotnet-version: 6.0.x
+ - name: Restore dependencies
+ run: dotnet restore
+ - name: Build CPU version
+ run: dotnet build --no-restore
+ - name: Test CPU version
+ run: dotnet test --no-build --verbosity normal
+ - name: uninstall redist cpu for unit tests
+ run: dotnet remove tools/Tensorflow.UnitTest.RedistHolder package SciSharp.TensorFlow.Redist
+ - name: install redist gpu for unit tests
+ run: dotnet add tools/Tensorflow.UnitTest.RedistHolder package SciSharp.TensorFlow.Redist-Linux-GPU
+ - name: Restore dependencies
+ run: dotnet restore
+ - name: Build GPU version
+ run: dotnet build --no-restore
+# - name: Test GPU version
+# run: dotnet test --no-build --verbosity normal
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
new file mode 100644
index 000000000..02601764c
--- /dev/null
+++ b/.github/workflows/release.yml
@@ -0,0 +1,62 @@
+name: auto-release
+
+on:
+ workflow_run:
+ workflows: ["release-prepare"]
+ types:
+ - completed
+
+env:
+ MYGET_API_TOKEN: ${{ SECRETS.MYGET_API_KEY }}
+ GITHUB_TOKEN: ${{ SECRETS.RINNE_GITHUB_TOKEN }}
+
+jobs:
+ release_to_myget:
+ runs-on: windows-latest
+# needs: run-semantic-release
+
+ steps:
+ - uses: actions/checkout@v3
+ - name: Setup .NET 6.0.x SDK
+ uses: actions/setup-dotnet@v3
+ with:
+ dotnet-version: 6.0.x
+
+ - name: Check .NET info
+ run: dotnet --info
+
+ - name: Install dependencies
+ run: dotnet restore
+
+ - name: Build solution
+ run: dotnet build -c Release --no-restore
+
+ - name: Pack packages
+ run: |
+ git fetch --unshallow;
+ git config remote.origin.fetch "+refs/heads/*:refs/remotes/origin/*";
+ git fetch origin;
+ $LastTag = git describe --tags;
+ $DroppedTag = ($LastTag).TrimStart('v');
+ echo "Last tag is: $DroppedTag";
+ $Suffix = "-nightly"
+ $Version = "${DroppedTag}${Suffix}";
+ echo "Publishing version: $Version";
+ dotnet pack ./src/TensorFlowNET.Core/Tensorflow.Binding.csproj -c Release -o packages /p:PackageVersion=$Version /p:Version=$Version;
+ dotnet pack ./src/TensorFlowNET.Keras/Tensorflow.Keras.csproj -c Release -o packages /p:PackageVersion=$Version /p:Version=$Version;
+ dotnet pack ./src/TensorflowNET.Hub/Tensorflow.Hub.csproj -c Release -o packages /p:PackageVersion=$Version /p:Version=$Version;
+
+ if($LastExitCode -ne 0)
+ {
+ Write-Warning -Message "Pack packages warming, last exit code is ${LastExitCode}."
+ $LastExitCode = 0;
+ }
+
+ - name: Upload packages artifacts
+ uses: actions/upload-artifact@v4.0.0
+ with:
+ name: "drop-ci-packages"
+ path: './packages'
+
+ - name: Push TensorFlow.NET to myget.org
+ run: dotnet nuget push .\packages\TensorFlow*.nupkg --source https://www.myget.org/F/scisharp/api/v3/index.json -k ${{ secrets.MYGET_API_KEY }} --skip-duplicate
diff --git a/.github/workflows/release_prepare.yml b/.github/workflows/release_prepare.yml
new file mode 100644
index 000000000..b21c6665c
--- /dev/null
+++ b/.github/workflows/release_prepare.yml
@@ -0,0 +1,46 @@
+name: release-prepare
+
+on:
+ pull_request:
+ branches:
+ - master
+ types: [ closed ]
+
+env:
+ MYGET_API_TOKEN: ${{ SECRETS.MYGET_API_KEY }}
+ GITHUB_TOKEN: ${{ SECRETS.RINNE_GITHUB_TOKEN }}
+
+jobs:
+ build:
+ if: contains(github.event.pull_request.labels.*.name, 'auto-release')
+ runs-on: windows-latest
+
+ steps:
+ - uses: actions/checkout@v3
+ - name: Setup .NET 6.0.x SDK
+ uses: actions/setup-dotnet@v3
+ with:
+ dotnet-version: 6.0.x
+
+ - name: Check .NET info
+ run: dotnet --info
+
+ - name: Install dependencies
+ run: dotnet restore
+
+ - name: Build solution
+ run: dotnet build -c Release --no-restore
+
+# run-semantic-release:
+# runs-on: ubuntu-latest
+# needs: build
+
+# steps:
+# - name: Checkout
+# uses: actions/checkout@v2
+
+# - name: Run semantic-release
+# run: |
+# export PATH=$PATH:$(yarn global bin)
+# yarn global add semantic-release@17.4.3
+# semantic-release
\ No newline at end of file
diff --git a/.github/workflows/semantic.yml b/.github/workflows/semantic.yml
new file mode 100644
index 000000000..db8c06a3e
--- /dev/null
+++ b/.github/workflows/semantic.yml
@@ -0,0 +1,17 @@
+name: Semantic
+
+on:
+ pull_request:
+ branches: [ "master" ]
+
+jobs:
+ semantic-pull-request:
+ name: Semantic check
+ runs-on: windows-latest
+ steps:
+ - name: semantic-pull-request
+ uses: amannn/action-semantic-pull-request@v4
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ with:
+ validateSingleCommit: true
diff --git a/.gitignore b/.gitignore
index 261c681a3..231d8379a 100644
--- a/.gitignore
+++ b/.gitignore
@@ -337,3 +337,5 @@ test/TensorFlowNET.Examples/mnist
# training model resources
.resources
/redist
+*.xml
+*.xsd
diff --git a/Directory.Build.props b/Directory.Build.props
new file mode 100644
index 000000000..065690ec9
--- /dev/null
+++ b/Directory.Build.props
@@ -0,0 +1,17 @@
+
+
+
+
+
+ true
+ $(NoWarn),1573,1591,1712
+
+
+
diff --git a/Directory.Build.targets b/Directory.Build.targets
new file mode 100644
index 000000000..341027f3c
--- /dev/null
+++ b/Directory.Build.targets
@@ -0,0 +1,3 @@
+
+
+
diff --git a/README.md b/README.md
index 15f72bf58..75cad0aa7 100644
--- a/README.md
+++ b/README.md
@@ -1,141 +1,236 @@

-**TensorFlow.NET** (TF.NET) provides a .NET Standard binding for [TensorFlow](https://www.tensorflow.org/). It aims to implement the complete Tensorflow API in C# which allows .NET developers to develop, train and deploy Machine Learning models with the cross-platform .NET Standard framework.
+**TensorFlow.NET** (TF.NET) provides a .NET Standard binding for [TensorFlow](https://www.tensorflow.org/). It aims to implement the complete Tensorflow API in C# which allows .NET developers to develop, train and deploy Machine Learning models with the cross-platform .NET Standard framework. TensorFlow.NET has built-in Keras high-level interface and is released as an independent package [TensorFlow.Keras](https://www.nuget.org/packages/TensorFlow.Keras/).
+[](https://discord.gg/qRVm82fKTS)
+[](http://qm.qq.com/cgi-bin/qm/qr?_wv=1027&k=sN9VVMwbWjs5L0ATpizKKxOcZdEPMrp8&authKey=RLDw41bLTrEyEgZZi%2FzT4pYk%2BwmEFgFcrhs8ZbkiVY7a4JFckzJefaYNW6Lk4yPX&noverify=0&group_code=985366726)
[](https://gitter.im/sci-sharp/community)
-[](https://ci.appveyor.com/project/Haiping-Chen/tensorflow-net)
-[](https://www.nuget.org/packages/TensorFlow.NET)
+[](https://github.com/SciSharp/TensorFlow.NET/actions/workflows/build_and_test.yml)
[](https://tensorflownet.readthedocs.io/en/latest/?badge=latest)
+[](https://www.nuget.org/packages/TensorFlow.NET)
+[](https://www.nuget.org/packages/TensorFlow.Keras)
+[](https://www.myget.org/feed/scisharp/package/nuget/Tensorflow.NET)
[](https://996.icu/#/en_US)
[](https://mybinder.org/v2/gh/javiercp/BinderTF.NET/master?urlpath=lab)
-*master branch is based on tensorflow 2.2 now, v0.15-tensorflow1.15 is from tensorflow1.15.*
+English | [中文](docs/README-CN.md)
-TF.NET is a member project of [SciSharp STACK](https://github.com/SciSharp).
+> [!IMPORTANT]
+> We're happy that our work on tensorflow.net has attracted many users. However, at this time, none of the main maintainers of this repo is available for new features and bug fix. We won't refuse PRs and will help to review them.
+>
+> If you would like to be a contributor or maintainer of tensorflow.net, we'd like to help you to start up.
+>
+> We feel sorry for that and we'll resume the maintaining for this project once one of us has bandwidth for it.
+>
+
+*master branch and v0.100.x is corresponding to tensorflow v2.10, v0.6x branch is from tensorflow v2.6, v0.15-tensorflow1.15 is from tensorflow1.15. Please add `https://www.myget.org/F/scisharp/api/v3/index.json` to nuget source to use nightly release.*

-### Why TensorFlow.NET ?
+## Why Tensorflow.NET ?
+
+`SciSharp STACK`'s mission is to bring popular data science technology into the .NET world and to provide .NET developers with a powerful Machine Learning tool set without reinventing the wheel. Since the APIs are kept as similar as possible you can immediately adapt any existing TensorFlow code in C# or F# with a zero learning curve. Take a look at a comparison picture and see how comfortably a TensorFlow/Python script translates into a C# program with TensorFlow.NET.
+
+
+
+SciSharp's philosophy allows a large number of machine learning code written in Python to be quickly migrated to .NET, enabling .NET developers to use cutting edge machine learning models and access a vast number of TensorFlow resources which would not be possible without this project.
+
+In comparison to other projects, like for instance [TensorFlowSharp](https://www.nuget.org/packages/TensorFlowSharp/) which only provide TensorFlow's low-level C++ API and can only run models that were built using Python, Tensorflow.NET makes it possible to build the pipeline of training and inference with pure C# and F#. Besides, Tensorflow.NET provides binding of Tensorflow.Keras to make it easy to transfer your code from python to .NET.
-`SciSharp STACK`'s mission is to bring popular data science technology into the .NET world and to provide .NET developers with a powerful Machine Learning tool set without reinventing the wheel. Since the APIs are kept as similar as possible you can immediately adapt any existing Tensorflow code in C# with a zero learning curve. Take a look at a comparison picture and see how comfortably a Tensorflow/Python script translates into a C# program with TensorFlow.NET.
+[ML.NET](https://github.com/dotnet/machinelearning) also take Tensorflow.NET as one of the backends to train and infer your model, which provides better integration with .NET.
-
+## Documention
-SciSharp's philosophy allows a large number of machine learning code written in Python to be quickly migrated to .NET, enabling .NET developers to use cutting edge machine learning models and access a vast number of Tensorflow resources which would not be possible without this project.
+Introduction and simple examples:[Tensorflow.NET Documents](https://scisharp.github.io/tensorflow-net-docs)
-In comparison to other projects, like for instance TensorFlowSharp which only provide Tensorflow's low-level C++ API and can only run models that were built using Python, Tensorflow.NET also implements Tensorflow's high level API where all the magic happens. This computation graph building layer is still under active development. Once it is completely implemented you can build new Machine Learning models in C#.
+Detailed documention:[The Definitive Guide to Tensorflow.NET](https://tensorflownet.readthedocs.io/en/latest/FrontCover.html)
-### How to use
+Examples:[TensorFlow.NET Examples](https://github.com/SciSharp/TensorFlow.NET-Examples)
-| TensorFlow | tf 1.13 | tf 1.14 | tf 1.15 | tf 2.2 |
-| ----------- | ------- | ------- | ------- | ------ |
-| tf.net 0.20 | | | x | x |
-| tf.net 0.15 | | x | x | |
-| tf.net 0.14 | x | x | | |
+Troubleshooting of running example or installation:[Tensorflow.NET FAQ](tensorflowlib/README.md)
+
+## Usage
+
+### Installation
+
+You can search the package name in NuGet Manager, or use the commands below in package manager console.
+
+The installation contains two parts, the first is the main body:
-Install TF.NET and TensorFlow binary through NuGet.
```sh
-### install tensorflow C# binding
+### Install Tensorflow.NET
PM> Install-Package TensorFlow.NET
-### Install tensorflow binary
-### For CPU version
+### Install Tensorflow.Keras
+PM> Install-Package TensorFlow.Keras
+```
+
+The second part is the computing support part. Only one of the following packages is needed, depending on your device and system.
+
+```
+### CPU version for Windows and Linux
PM> Install-Package SciSharp.TensorFlow.Redist
-### For GPU version (CUDA and cuDNN are required)
+### CPU version for MacOS
+PM> Install-Package SciSharp.TensorFlow.Redist-OSX
+
+### GPU version for Windows (CUDA and cuDNN are required)
PM> Install-Package SciSharp.TensorFlow.Redist-Windows-GPU
+
+### GPU version for Linux (CUDA and cuDNN are required)
+PM> Install-Package SciSharp.TensorFlow.Redist-Linux-GPU
```
-Import TF.NET in your project.
-```cs
-using static Tensorflow.Binding;
-```
+Two simple examples are given here to introduce the basic usage of Tensorflow.NET. As you can see, it's easy to write C# code just like that in Python.
-Linear Regression:
+### Example - Linear Regression in `Eager` mode
-```c#
-// We can set a fixed init value in order to debug
+```csharp
+using static Tensorflow.Binding;
+using static Tensorflow.KerasApi;
+using Tensorflow;
+using Tensorflow.NumPy;
+
+// Parameters
+var training_steps = 1000;
+var learning_rate = 0.01f;
+var display_step = 100;
+
+// Sample data
+var X = np.array(3.3f, 4.4f, 5.5f, 6.71f, 6.93f, 4.168f, 9.779f, 6.182f, 7.59f, 2.167f,
+ 7.042f, 10.791f, 5.313f, 7.997f, 5.654f, 9.27f, 3.1f);
+var Y = np.array(1.7f, 2.76f, 2.09f, 3.19f, 1.694f, 1.573f, 3.366f, 2.596f, 2.53f, 1.221f,
+ 2.827f, 3.465f, 1.65f, 2.904f, 2.42f, 2.94f, 1.3f);
+var n_samples = X.shape[0];
+
+// We can set a fixed init value in order to demo
var W = tf.Variable(-0.06f, name: "weight");
var b = tf.Variable(-0.73f, name: "bias");
+var optimizer = keras.optimizers.SGD(learning_rate);
-// Construct a linear model
-var pred = tf.add(tf.multiply(X, W), b);
+// Run training for the given number of steps.
+foreach (var step in range(1, training_steps + 1))
+{
+ // Run the optimization to update W and b values.
+ // Wrap computation inside a GradientTape for automatic differentiation.
+ using var g = tf.GradientTape();
+ // Linear regression (Wx + b).
+ var pred = W * X + b;
+ // Mean square error.
+ var loss = tf.reduce_sum(tf.pow(pred - Y, 2)) / (2 * n_samples);
+ // should stop recording
+ // Compute gradients.
+ var gradients = g.gradient(loss, (W, b));
+
+ // Update W and b following gradients.
+ optimizer.apply_gradients(zip(gradients, (W, b)));
+
+ if (step % display_step == 0)
+ {
+ pred = W * X + b;
+ loss = tf.reduce_sum(tf.pow(pred - Y, 2)) / (2 * n_samples);
+ print($"step: {step}, loss: {loss.numpy()}, W: {W.numpy()}, b: {b.numpy()}");
+ }
+}
+```
-// Mean squared error
-var cost = tf.reduce_sum(tf.pow(pred - Y, 2.0f)) / (2.0f * n_samples);
+Run this example in [Jupyter Notebook](https://github.com/SciSharp/SciSharpCube).
-// Gradient descent
-// Note, minimize() knows to modify W and b because Variable objects are trainable=True by default
-var optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost);
+### Example - Toy version of `ResNet` in `Keras` functional API
-// Initialize the variables (i.e. assign their default value)
-var init = tf.global_variables_initializer();
+```csharp
+using static Tensorflow.Binding;
+using static Tensorflow.KerasApi;
+using Tensorflow;
+using Tensorflow.NumPy;
+
+var layers = keras.layers;
+// input layer
+var inputs = keras.Input(shape: (32, 32, 3), name: "img");
+// convolutional layer
+var x = layers.Conv2D(32, 3, activation: "relu").Apply(inputs);
+x = layers.Conv2D(64, 3, activation: "relu").Apply(x);
+var block_1_output = layers.MaxPooling2D(3).Apply(x);
+x = layers.Conv2D(64, 3, activation: "relu", padding: "same").Apply(block_1_output);
+x = layers.Conv2D(64, 3, activation: "relu", padding: "same").Apply(x);
+var block_2_output = layers.Add().Apply(new Tensors(x, block_1_output));
+x = layers.Conv2D(64, 3, activation: "relu", padding: "same").Apply(block_2_output);
+x = layers.Conv2D(64, 3, activation: "relu", padding: "same").Apply(x);
+var block_3_output = layers.Add().Apply(new Tensors(x, block_2_output));
+x = layers.Conv2D(64, 3, activation: "relu").Apply(block_3_output);
+x = layers.GlobalAveragePooling2D().Apply(x);
+x = layers.Dense(256, activation: "relu").Apply(x);
+x = layers.Dropout(0.5f).Apply(x);
+// output layer
+var outputs = layers.Dense(10).Apply(x);
+// build keras model
+var model = keras.Model(inputs, outputs, name: "toy_resnet");
+model.summary();
+// compile keras model in tensorflow static graph
+model.compile(optimizer: keras.optimizers.RMSprop(1e-3f),
+ loss: keras.losses.SparseCategoricalCrossentropy(from_logits: true),
+ metrics: new[] { "acc" });
+// prepare dataset
+var ((x_train, y_train), (x_test, y_test)) = keras.datasets.cifar10.load_data();
+// normalize the input
+x_train = x_train / 255.0f;
+// training
+model.fit(x_train[new Slice(0, 2000)], y_train[new Slice(0, 2000)],
+ batch_size: 64,
+ epochs: 10,
+ validation_split: 0.2f);
+// save the model
+model.save("./toy_resnet_model");
+```
-// Start training
-using(tf.Session())
-{
- // Run the initializer
- sess.run(init);
+The F# example for linear regression is available [here](docs/Example-fsharp.md).
- // Fit all training data
- for (int epoch = 0; epoch < training_epochs; epoch++)
- {
- foreach (var (x, y) in zip(train_X, train_Y))
- sess.run(optimizer, (X, x), (Y, y));
-
- // Display logs per epoch step
- if ((epoch + 1) % display_step == 0)
- {
- var c = sess.run(cost, (X, train_X), (Y, train_Y));
- Console.WriteLine($"Epoch: {epoch + 1} cost={c} " + $"W={sess.run(W)} b={sess.run(b)}");
- }
- }
+More adcanced examples could be found in [TensorFlow.NET Examples](https://github.com/SciSharp/TensorFlow.NET-Examples).
- Console.WriteLine("Optimization Finished!");
- var training_cost = sess.run(cost, (X, train_X), (Y, train_Y));
- Console.WriteLine($"Training cost={training_cost} W={sess.run(W)} b={sess.run(b)}");
-
- // Testing example
- var test_X = np.array(6.83f, 4.668f, 8.9f, 7.91f, 5.7f, 8.7f, 3.1f, 2.1f);
- var test_Y = np.array(1.84f, 2.273f, 3.2f, 2.831f, 2.92f, 3.24f, 1.35f, 1.03f);
- Console.WriteLine("Testing... (Mean square loss Comparison)");
- var testing_cost = sess.run(tf.reduce_sum(tf.pow(pred - Y, 2.0f)) / (2.0f * test_X.shape[0]),
- (X, test_X), (Y, test_Y));
- Console.WriteLine($"Testing cost={testing_cost}");
- var diff = Math.Abs((float)training_cost - (float)testing_cost);
- Console.WriteLine($"Absolute mean square loss difference: {diff}");
-
- return diff < 0.01;
-});
-```
+## Version Relationships
-Run this example in [Jupyter Notebook](https://github.com/SciSharp/SciSharpCube).
+| TensorFlow.NET Versions | tensorflow 1.14, cuda 10.0 | tensorflow 1.15, cuda 10.0 | tensorflow 2.3, cuda 10.1 | tensorflow 2.4, cuda 11 | tensorflow 2.7, cuda 11 |tensorflow 2.10, cuda 11 |
+| -------------------------- | ------------- | -------------- | ------------- | ------------- | ------------ | ------------ |
+| tf.net 0.10x, tf.keras 0.10 | | | | | | x |
+| tf.net 0.7x, tf.keras 0.7 | | | | | x | |
+| tf.net 0.4x, tf.keras 0.5 | | | | x | | |
+| tf.net 0.3x, tf.keras 0.4 | | | x | | | |
+| tf.net 0.2x | | x | x | | | |
+| tf.net 0.15 | x | x | | | | |
+| tf.net 0.14 | x | | | | | |
-Read the docs & book [The Definitive Guide to Tensorflow.NET](https://tensorflownet.readthedocs.io/en/latest/FrontCover.html).
-There are many examples reside at [TensorFlow.NET Examples](https://github.com/SciSharp/TensorFlow.NET-Examples).
+```
+tf.net 0.4x -> tf native 2.4
+tf.net 0.6x -> tf native 2.6
+tf.net 0.7x -> tf native 2.7
+tf.net 0.10x -> tf native 2.10
+...
+```
-Troubleshooting of running example or installation, please refer [here](tensorflowlib/README.md).
+## Contribution:
-### Contribute:
+Feel like contributing to one of the hottest projects in the Machine Learning field? Want to know how Tensorflow magically creates the computational graph?
-Feel like contributing to one of the hottest projects in the Machine Learning field? Want to know how Tensorflow magically creates the computational graph? We appreciate every contribution however small. There are tasks for novices to experts alike, if everyone tackles only a small task the sum of contributions will be huge.
+We appreciate every contribution however small! There are tasks for novices to experts alike, if everyone tackles only a small task the sum of contributions will be huge.
You can:
-* Let everyone know about this project
-* Port Tensorflow unit tests from Python to C#
-* Port missing Tensorflow code from Python to C#
-* Port Tensorflow examples to C# and raise issues if you come accross missing parts of the API
-* Debug one of the unit tests that is marked as Ignored to get it to work
-* Debug one of the not yet working examples and get it to work
+- Star Tensorflow.NET or share it with others
+- Tell us about the missing APIs compared to Tensorflow
+- Port Tensorflow unit tests from Python to C# or F#
+- Port Tensorflow examples to C# or F# and raise issues if you come accross missing parts of the API or BUG
+- Debug one of the unit tests that is marked as Ignored to get it to work
+- Debug one of the not yet working examples and get it to work
+- Help us to complete the documentions.
+
-### How to debug unit tests:
+#### How to debug unit tests:
-The best way to find out why a unit test is failing is to single step it in C# and its pendant Python at the same time to see where the flow of execution digresses or where variables exhibit different values. Good Python IDEs like PyCharm let you single step into the tensorflow library code.
+The best way to find out why a unit test is failing is to single step it in C# or F# and its corresponding Python at the same time to see where the flow of execution digresses or where variables exhibit different values. Good Python IDEs like PyCharm let you single step into the tensorflow library code.
-### Git Knowhow for Contributors
+#### Git Knowhow for Contributors
Add SciSharp/TensorFlow.NET as upstream to your local repo ...
```git
@@ -147,17 +242,19 @@ Please make sure you keep your fork up to date by regularly pulling from upstrea
git pull upstream master
```
-### Contact
-
-Feel free to star or raise issue on [Github](https://github.com/SciSharp/TensorFlow.NET).
+### Support
+Buy our book to make open source project be sustainable [TensorFlow.NET实战](https://item.jd.com/13441549.html)
+
+
+
+
+
-Follow us on [Medium](https://medium.com/scisharp).
-
-Join our chat on [Gitter](https://gitter.im/sci-sharp/community).
+### Contact
-Scan QR code to join Tencent TIM group:
+Join our chat on [Discord](https://discord.gg/qRVm82fKTS) or [Gitter](https://gitter.im/sci-sharp/community).
-
+Follow us on [Twitter](https://twitter.com/ScisharpStack), [Facebook](https://www.facebook.com/scisharp.stack.9), [Medium](https://medium.com/scisharp), [LinkedIn](https://www.linkedin.com/company/scisharp-stack/).
TensorFlow.NET is a part of [SciSharp STACK](https://scisharp.github.io/SciSharp/)
diff --git a/TensorFlow.NET.sln b/TensorFlow.NET.sln
index 20563359b..e0c273568 100644
--- a/TensorFlow.NET.sln
+++ b/TensorFlow.NET.sln
@@ -1,114 +1,390 @@
Microsoft Visual Studio Solution File, Format Version 12.00
-# Visual Studio Version 16
-VisualStudioVersion = 16.0.29102.190
+# Visual Studio Version 17
+VisualStudioVersion = 17.4.33213.308
MinimumVisualStudioVersion = 10.0.40219.1
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Binding", "src\TensorFlowNET.Core\Tensorflow.Binding.csproj", "{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}"
EndProject
-Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Benchmark", "src\TensorFlowNet.Benchmarks\Tensorflow.Benchmark.csproj", "{3A6EB896-604F-4E25-B677-B8103BCF3D2E}"
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Binding.UnitTest", "test\TensorFlowNET.UnitTest\Tensorflow.Binding.UnitTest.csproj", "{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}"
EndProject
-Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.UnitTest", "test\TensorFlowNET.UnitTest\Tensorflow.UnitTest.csproj", "{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}"
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Keras", "src\TensorFlowNET.Keras\Tensorflow.Keras.csproj", "{49D71826-C03D-4FA7-9BAC-22C1327E65CF}"
EndProject
-Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Keras", "src\TensorFlowNET.Keras\Tensorflow.Keras.csproj", "{6268B461-486A-460B-9B3C-86493CBBAAF7}"
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Text", "src\TensorFlowNET.Text\Tensorflow.Text.csproj", "{1AB8108D-4FFE-4A16-88E7-328EAF686370}"
EndProject
-Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Keras.UnitTest", "test\Tensorflow.Keras.UnitTest\Tensorflow.Keras.UnitTest.csproj", "{EB92DD90-6346-41FB-B967-2B33A860AD98}"
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Recommenders", "src\TensorFlowNET.Recommenders\Tensorflow.Recommenders.csproj", "{F17AAECB-960A-4E18-A270-BAD776F0E55B}"
+EndProject
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Native.UnitTest", "test\TensorFlowNET.Native.UnitTest\Tensorflow.Native.UnitTest.csproj", "{84CA35F8-99FC-408E-8DF3-5AA175E5EFD3}"
+EndProject
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Keras.UnitTest", "test\TensorFlowNET.Keras.UnitTest\Tensorflow.Keras.UnitTest.csproj", "{79EB56DF-E29E-4AE2-A7D9-FE403FD919BA}"
+EndProject
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "TensorFlowNET.Graph.UnitTest", "test\TensorFlowNET.Graph.UnitTest\TensorFlowNET.Graph.UnitTest.csproj", "{3F5388FF-FBB4-462B-8F6F-829FFBAEB8A3}"
+EndProject
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Hub", "src\TensorflowNET.Hub\Tensorflow.Hub.csproj", "{9738D16A-CFA0-405C-A7DF-D3D203B0CB18}"
+EndProject
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Hub.Unittest", "test\TensorflowNET.Hub.Unittest\Tensorflow.Hub.Unittest.csproj", "{7DEA8760-E401-4872-81F3-405F185A13A0}"
+EndProject
+Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "src", "src", "{01A1787F-A9BE-4221-84E8-6360DD010AB6}"
+EndProject
+Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "test", "test", "{1B0918B9-65AD-4F34-A287-AF4597B27DBD}"
+EndProject
+Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "tools", "tools", "{E1A5D2B7-10AF-4876-85C0-7714EF274214}"
+EndProject
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.CodeGen", "tools\Tensorflow.CodeGen\Tensorflow.CodeGen.csproj", "{3D92142F-EEDB-469B-B03C-4E38728BFE4C}"
+EndProject
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Redist.NativeLibrarySplitter", "tools\Tensorflow.Redist.NativeLibrarySplitter\Tensorflow.Redist.NativeLibrarySplitter.csproj", "{AB131FA7-B7C3-4ABF-ABDE-E059C72A613C}"
+EndProject
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.UnitTest.RedistHolder", "tools\Tensorflow.UnitTest.RedistHolder\Tensorflow.UnitTest.RedistHolder.csproj", "{D24FCAA5-548C-4251-B226-A1B6535D0845}"
+EndProject
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Benchmark", "tools\TensorFlowNET.Benchmarks\Tensorflow.Benchmark.csproj", "{C23563DB-FE21-48E7-A411-87A109E4A899}"
+EndProject
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Console", "tools\TensorFlowNET.Console\Tensorflow.Console.csproj", "{1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0}"
+EndProject
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "TensorFlow.Kernel.UnitTest", "test\TensorFlow.Kernel.UnitTest\TensorFlow.Kernel.UnitTest.csproj", "{654A027D-1364-4729-880B-144DFE1FF5BB}"
+EndProject
+Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Tensorflow.UnitTest", "test\Tensorflow.UnitTest\Tensorflow.UnitTest.csproj", "{A73DF5A6-866E-4AED-9017-AA2EE86368C4}"
EndProject
Global
GlobalSection(SolutionConfigurationPlatforms) = preSolution
Debug|Any CPU = Debug|Any CPU
Debug|x64 = Debug|x64
- Debug-Minimal|Any CPU = Debug-Minimal|Any CPU
- Debug-Minimal|x64 = Debug-Minimal|x64
- Publish|Any CPU = Publish|Any CPU
- Publish|x64 = Publish|x64
+ Debug|x86 = Debug|x86
+ GPU|Any CPU = GPU|Any CPU
+ GPU|x64 = GPU|x64
+ GPU|x86 = GPU|x86
Release|Any CPU = Release|Any CPU
Release|x64 = Release|x64
+ Release|x86 = Release|x86
EndGlobalSection
GlobalSection(ProjectConfigurationPlatforms) = postSolution
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug|Any CPU.Build.0 = Debug|Any CPU
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug|x64.ActiveCfg = Debug|x64
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug|x64.Build.0 = Debug|x64
- {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU
- {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU
- {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug-Minimal|x64.ActiveCfg = Debug|x64
- {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug-Minimal|x64.Build.0 = Debug|x64
- {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Publish|Any CPU.ActiveCfg = Release|Any CPU
- {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Publish|Any CPU.Build.0 = Release|Any CPU
- {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Publish|x64.ActiveCfg = Release|x64
- {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Publish|x64.Build.0 = Release|x64
+ {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug|x86.ActiveCfg = Debug|Any CPU
+ {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug|x86.Build.0 = Debug|Any CPU
+ {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.GPU|Any CPU.ActiveCfg = GPU|Any CPU
+ {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.GPU|Any CPU.Build.0 = GPU|Any CPU
+ {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.GPU|x64.ActiveCfg = GPU|x64
+ {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.GPU|x64.Build.0 = GPU|x64
+ {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.GPU|x86.ActiveCfg = GPU|Any CPU
+ {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.GPU|x86.Build.0 = GPU|Any CPU
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Release|Any CPU.ActiveCfg = Release|Any CPU
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Release|Any CPU.Build.0 = Release|Any CPU
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Release|x64.ActiveCfg = Release|x64
{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Release|x64.Build.0 = Release|x64
- {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
- {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug|Any CPU.Build.0 = Debug|Any CPU
- {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug|x64.ActiveCfg = Debug|x64
- {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug|x64.Build.0 = Debug|x64
- {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU
- {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU
- {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug-Minimal|x64.ActiveCfg = Debug|x64
- {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug-Minimal|x64.Build.0 = Debug|x64
- {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Publish|Any CPU.ActiveCfg = Release|Any CPU
- {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Publish|Any CPU.Build.0 = Release|Any CPU
- {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Publish|x64.ActiveCfg = Release|x64
- {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Publish|x64.Build.0 = Release|x64
- {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Release|Any CPU.ActiveCfg = Release|Any CPU
- {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Release|Any CPU.Build.0 = Release|Any CPU
- {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Release|x64.ActiveCfg = Release|x64
- {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Release|x64.Build.0 = Release|x64
+ {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Release|x86.ActiveCfg = Release|Any CPU
+ {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Release|x86.Build.0 = Release|Any CPU
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug|Any CPU.Build.0 = Debug|Any CPU
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug|x64.ActiveCfg = Debug|x64
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug|x64.Build.0 = Debug|x64
- {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU
- {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU
- {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug-Minimal|x64.ActiveCfg = Debug|x64
- {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug-Minimal|x64.Build.0 = Debug|x64
- {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Publish|Any CPU.ActiveCfg = Release|Any CPU
- {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Publish|Any CPU.Build.0 = Release|Any CPU
- {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Publish|x64.ActiveCfg = Release|x64
- {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Publish|x64.Build.0 = Release|x64
+ {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug|x86.ActiveCfg = Debug|Any CPU
+ {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug|x86.Build.0 = Debug|Any CPU
+ {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.GPU|Any CPU.ActiveCfg = Release|Any CPU
+ {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.GPU|Any CPU.Build.0 = Release|Any CPU
+ {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.GPU|x64.ActiveCfg = Release|x64
+ {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.GPU|x64.Build.0 = Release|x64
+ {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.GPU|x86.ActiveCfg = Release|Any CPU
+ {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.GPU|x86.Build.0 = Release|Any CPU
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Release|Any CPU.ActiveCfg = Release|Any CPU
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Release|Any CPU.Build.0 = Release|Any CPU
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Release|x64.ActiveCfg = Release|x64
{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Release|x64.Build.0 = Release|x64
- {6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
- {6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug|Any CPU.Build.0 = Debug|Any CPU
- {6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug|x64.ActiveCfg = Debug|x64
- {6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug|x64.Build.0 = Debug|x64
- {6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU
- {6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU
- {6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug-Minimal|x64.ActiveCfg = Debug|x64
- {6268B461-486A-460B-9B3C-86493CBBAAF7}.Debug-Minimal|x64.Build.0 = Debug|x64
- {6268B461-486A-460B-9B3C-86493CBBAAF7}.Publish|Any CPU.ActiveCfg = Release|Any CPU
- {6268B461-486A-460B-9B3C-86493CBBAAF7}.Publish|Any CPU.Build.0 = Release|Any CPU
- {6268B461-486A-460B-9B3C-86493CBBAAF7}.Publish|x64.ActiveCfg = Release|x64
- {6268B461-486A-460B-9B3C-86493CBBAAF7}.Publish|x64.Build.0 = Release|x64
- {6268B461-486A-460B-9B3C-86493CBBAAF7}.Release|Any CPU.ActiveCfg = Release|Any CPU
- {6268B461-486A-460B-9B3C-86493CBBAAF7}.Release|Any CPU.Build.0 = Release|Any CPU
- {6268B461-486A-460B-9B3C-86493CBBAAF7}.Release|x64.ActiveCfg = Release|x64
- {6268B461-486A-460B-9B3C-86493CBBAAF7}.Release|x64.Build.0 = Release|x64
- {EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
- {EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug|Any CPU.Build.0 = Debug|Any CPU
- {EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug|x64.ActiveCfg = Debug|x64
- {EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug|x64.Build.0 = Debug|x64
- {EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU
- {EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU
- {EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug-Minimal|x64.ActiveCfg = Debug|x64
- {EB92DD90-6346-41FB-B967-2B33A860AD98}.Debug-Minimal|x64.Build.0 = Debug|x64
- {EB92DD90-6346-41FB-B967-2B33A860AD98}.Publish|Any CPU.ActiveCfg = Release|Any CPU
- {EB92DD90-6346-41FB-B967-2B33A860AD98}.Publish|Any CPU.Build.0 = Release|Any CPU
- {EB92DD90-6346-41FB-B967-2B33A860AD98}.Publish|x64.ActiveCfg = Release|x64
- {EB92DD90-6346-41FB-B967-2B33A860AD98}.Publish|x64.Build.0 = Release|x64
- {EB92DD90-6346-41FB-B967-2B33A860AD98}.Release|Any CPU.ActiveCfg = Release|Any CPU
- {EB92DD90-6346-41FB-B967-2B33A860AD98}.Release|Any CPU.Build.0 = Release|Any CPU
- {EB92DD90-6346-41FB-B967-2B33A860AD98}.Release|x64.ActiveCfg = Release|x64
- {EB92DD90-6346-41FB-B967-2B33A860AD98}.Release|x64.Build.0 = Release|x64
+ {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Release|x86.ActiveCfg = Release|Any CPU
+ {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Release|x86.Build.0 = Release|Any CPU
+ {49D71826-C03D-4FA7-9BAC-22C1327E65CF}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {49D71826-C03D-4FA7-9BAC-22C1327E65CF}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {49D71826-C03D-4FA7-9BAC-22C1327E65CF}.Debug|x64.ActiveCfg = Debug|x64
+ {49D71826-C03D-4FA7-9BAC-22C1327E65CF}.Debug|x64.Build.0 = Debug|x64
+ {49D71826-C03D-4FA7-9BAC-22C1327E65CF}.Debug|x86.ActiveCfg = Debug|Any CPU
+ {49D71826-C03D-4FA7-9BAC-22C1327E65CF}.Debug|x86.Build.0 = Debug|Any CPU
+ {49D71826-C03D-4FA7-9BAC-22C1327E65CF}.GPU|Any CPU.ActiveCfg = GPU|Any CPU
+ {49D71826-C03D-4FA7-9BAC-22C1327E65CF}.GPU|Any CPU.Build.0 = GPU|Any CPU
+ {49D71826-C03D-4FA7-9BAC-22C1327E65CF}.GPU|x64.ActiveCfg = GPU|x64
+ {49D71826-C03D-4FA7-9BAC-22C1327E65CF}.GPU|x64.Build.0 = GPU|x64
+ {49D71826-C03D-4FA7-9BAC-22C1327E65CF}.GPU|x86.ActiveCfg = GPU|Any CPU
+ {49D71826-C03D-4FA7-9BAC-22C1327E65CF}.GPU|x86.Build.0 = GPU|Any CPU
+ {49D71826-C03D-4FA7-9BAC-22C1327E65CF}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {49D71826-C03D-4FA7-9BAC-22C1327E65CF}.Release|Any CPU.Build.0 = Release|Any CPU
+ {49D71826-C03D-4FA7-9BAC-22C1327E65CF}.Release|x64.ActiveCfg = Release|x64
+ {49D71826-C03D-4FA7-9BAC-22C1327E65CF}.Release|x64.Build.0 = Release|x64
+ {49D71826-C03D-4FA7-9BAC-22C1327E65CF}.Release|x86.ActiveCfg = Release|Any CPU
+ {49D71826-C03D-4FA7-9BAC-22C1327E65CF}.Release|x86.Build.0 = Release|Any CPU
+ {1AB8108D-4FFE-4A16-88E7-328EAF686370}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {1AB8108D-4FFE-4A16-88E7-328EAF686370}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {1AB8108D-4FFE-4A16-88E7-328EAF686370}.Debug|x64.ActiveCfg = Debug|x64
+ {1AB8108D-4FFE-4A16-88E7-328EAF686370}.Debug|x64.Build.0 = Debug|x64
+ {1AB8108D-4FFE-4A16-88E7-328EAF686370}.Debug|x86.ActiveCfg = Debug|Any CPU
+ {1AB8108D-4FFE-4A16-88E7-328EAF686370}.Debug|x86.Build.0 = Debug|Any CPU
+ {1AB8108D-4FFE-4A16-88E7-328EAF686370}.GPU|Any CPU.ActiveCfg = Release|Any CPU
+ {1AB8108D-4FFE-4A16-88E7-328EAF686370}.GPU|Any CPU.Build.0 = Release|Any CPU
+ {1AB8108D-4FFE-4A16-88E7-328EAF686370}.GPU|x64.ActiveCfg = Release|x64
+ {1AB8108D-4FFE-4A16-88E7-328EAF686370}.GPU|x64.Build.0 = Release|x64
+ {1AB8108D-4FFE-4A16-88E7-328EAF686370}.GPU|x86.ActiveCfg = Release|Any CPU
+ {1AB8108D-4FFE-4A16-88E7-328EAF686370}.GPU|x86.Build.0 = Release|Any CPU
+ {1AB8108D-4FFE-4A16-88E7-328EAF686370}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {1AB8108D-4FFE-4A16-88E7-328EAF686370}.Release|Any CPU.Build.0 = Release|Any CPU
+ {1AB8108D-4FFE-4A16-88E7-328EAF686370}.Release|x64.ActiveCfg = Release|x64
+ {1AB8108D-4FFE-4A16-88E7-328EAF686370}.Release|x64.Build.0 = Release|x64
+ {1AB8108D-4FFE-4A16-88E7-328EAF686370}.Release|x86.ActiveCfg = Release|Any CPU
+ {1AB8108D-4FFE-4A16-88E7-328EAF686370}.Release|x86.Build.0 = Release|Any CPU
+ {F17AAECB-960A-4E18-A270-BAD776F0E55B}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {F17AAECB-960A-4E18-A270-BAD776F0E55B}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {F17AAECB-960A-4E18-A270-BAD776F0E55B}.Debug|x64.ActiveCfg = Debug|x64
+ {F17AAECB-960A-4E18-A270-BAD776F0E55B}.Debug|x64.Build.0 = Debug|x64
+ {F17AAECB-960A-4E18-A270-BAD776F0E55B}.Debug|x86.ActiveCfg = Debug|Any CPU
+ {F17AAECB-960A-4E18-A270-BAD776F0E55B}.Debug|x86.Build.0 = Debug|Any CPU
+ {F17AAECB-960A-4E18-A270-BAD776F0E55B}.GPU|Any CPU.ActiveCfg = Release|Any CPU
+ {F17AAECB-960A-4E18-A270-BAD776F0E55B}.GPU|Any CPU.Build.0 = Release|Any CPU
+ {F17AAECB-960A-4E18-A270-BAD776F0E55B}.GPU|x64.ActiveCfg = Release|x64
+ {F17AAECB-960A-4E18-A270-BAD776F0E55B}.GPU|x64.Build.0 = Release|x64
+ {F17AAECB-960A-4E18-A270-BAD776F0E55B}.GPU|x86.ActiveCfg = Release|Any CPU
+ {F17AAECB-960A-4E18-A270-BAD776F0E55B}.GPU|x86.Build.0 = Release|Any CPU
+ {F17AAECB-960A-4E18-A270-BAD776F0E55B}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {F17AAECB-960A-4E18-A270-BAD776F0E55B}.Release|Any CPU.Build.0 = Release|Any CPU
+ {F17AAECB-960A-4E18-A270-BAD776F0E55B}.Release|x64.ActiveCfg = Release|x64
+ {F17AAECB-960A-4E18-A270-BAD776F0E55B}.Release|x64.Build.0 = Release|x64
+ {F17AAECB-960A-4E18-A270-BAD776F0E55B}.Release|x86.ActiveCfg = Release|Any CPU
+ {F17AAECB-960A-4E18-A270-BAD776F0E55B}.Release|x86.Build.0 = Release|Any CPU
+ {84CA35F8-99FC-408E-8DF3-5AA175E5EFD3}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {84CA35F8-99FC-408E-8DF3-5AA175E5EFD3}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {84CA35F8-99FC-408E-8DF3-5AA175E5EFD3}.Debug|x64.ActiveCfg = Debug|x64
+ {84CA35F8-99FC-408E-8DF3-5AA175E5EFD3}.Debug|x64.Build.0 = Debug|x64
+ {84CA35F8-99FC-408E-8DF3-5AA175E5EFD3}.Debug|x86.ActiveCfg = Debug|Any CPU
+ {84CA35F8-99FC-408E-8DF3-5AA175E5EFD3}.Debug|x86.Build.0 = Debug|Any CPU
+ {84CA35F8-99FC-408E-8DF3-5AA175E5EFD3}.GPU|Any CPU.ActiveCfg = Release|Any CPU
+ {84CA35F8-99FC-408E-8DF3-5AA175E5EFD3}.GPU|Any CPU.Build.0 = Release|Any CPU
+ {84CA35F8-99FC-408E-8DF3-5AA175E5EFD3}.GPU|x64.ActiveCfg = Release|x64
+ {84CA35F8-99FC-408E-8DF3-5AA175E5EFD3}.GPU|x64.Build.0 = Release|x64
+ {84CA35F8-99FC-408E-8DF3-5AA175E5EFD3}.GPU|x86.ActiveCfg = Release|Any CPU
+ {84CA35F8-99FC-408E-8DF3-5AA175E5EFD3}.GPU|x86.Build.0 = Release|Any CPU
+ {84CA35F8-99FC-408E-8DF3-5AA175E5EFD3}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {84CA35F8-99FC-408E-8DF3-5AA175E5EFD3}.Release|Any CPU.Build.0 = Release|Any CPU
+ {84CA35F8-99FC-408E-8DF3-5AA175E5EFD3}.Release|x64.ActiveCfg = Release|x64
+ {84CA35F8-99FC-408E-8DF3-5AA175E5EFD3}.Release|x64.Build.0 = Release|x64
+ {84CA35F8-99FC-408E-8DF3-5AA175E5EFD3}.Release|x86.ActiveCfg = Release|Any CPU
+ {84CA35F8-99FC-408E-8DF3-5AA175E5EFD3}.Release|x86.Build.0 = Release|Any CPU
+ {79EB56DF-E29E-4AE2-A7D9-FE403FD919BA}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {79EB56DF-E29E-4AE2-A7D9-FE403FD919BA}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {79EB56DF-E29E-4AE2-A7D9-FE403FD919BA}.Debug|x64.ActiveCfg = Debug|x64
+ {79EB56DF-E29E-4AE2-A7D9-FE403FD919BA}.Debug|x64.Build.0 = Debug|x64
+ {79EB56DF-E29E-4AE2-A7D9-FE403FD919BA}.Debug|x86.ActiveCfg = Debug|Any CPU
+ {79EB56DF-E29E-4AE2-A7D9-FE403FD919BA}.Debug|x86.Build.0 = Debug|Any CPU
+ {79EB56DF-E29E-4AE2-A7D9-FE403FD919BA}.GPU|Any CPU.ActiveCfg = Release|Any CPU
+ {79EB56DF-E29E-4AE2-A7D9-FE403FD919BA}.GPU|Any CPU.Build.0 = Release|Any CPU
+ {79EB56DF-E29E-4AE2-A7D9-FE403FD919BA}.GPU|x64.ActiveCfg = Release|x64
+ {79EB56DF-E29E-4AE2-A7D9-FE403FD919BA}.GPU|x64.Build.0 = Release|x64
+ {79EB56DF-E29E-4AE2-A7D9-FE403FD919BA}.GPU|x86.ActiveCfg = Release|Any CPU
+ {79EB56DF-E29E-4AE2-A7D9-FE403FD919BA}.GPU|x86.Build.0 = Release|Any CPU
+ {79EB56DF-E29E-4AE2-A7D9-FE403FD919BA}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {79EB56DF-E29E-4AE2-A7D9-FE403FD919BA}.Release|Any CPU.Build.0 = Release|Any CPU
+ {79EB56DF-E29E-4AE2-A7D9-FE403FD919BA}.Release|x64.ActiveCfg = Release|x64
+ {79EB56DF-E29E-4AE2-A7D9-FE403FD919BA}.Release|x64.Build.0 = Release|x64
+ {79EB56DF-E29E-4AE2-A7D9-FE403FD919BA}.Release|x86.ActiveCfg = Release|Any CPU
+ {79EB56DF-E29E-4AE2-A7D9-FE403FD919BA}.Release|x86.Build.0 = Release|Any CPU
+ {3F5388FF-FBB4-462B-8F6F-829FFBAEB8A3}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {3F5388FF-FBB4-462B-8F6F-829FFBAEB8A3}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {3F5388FF-FBB4-462B-8F6F-829FFBAEB8A3}.Debug|x64.ActiveCfg = Debug|x64
+ {3F5388FF-FBB4-462B-8F6F-829FFBAEB8A3}.Debug|x64.Build.0 = Debug|x64
+ {3F5388FF-FBB4-462B-8F6F-829FFBAEB8A3}.Debug|x86.ActiveCfg = Debug|Any CPU
+ {3F5388FF-FBB4-462B-8F6F-829FFBAEB8A3}.Debug|x86.Build.0 = Debug|Any CPU
+ {3F5388FF-FBB4-462B-8F6F-829FFBAEB8A3}.GPU|Any CPU.ActiveCfg = Release|Any CPU
+ {3F5388FF-FBB4-462B-8F6F-829FFBAEB8A3}.GPU|Any CPU.Build.0 = Release|Any CPU
+ {3F5388FF-FBB4-462B-8F6F-829FFBAEB8A3}.GPU|x64.ActiveCfg = Release|x64
+ {3F5388FF-FBB4-462B-8F6F-829FFBAEB8A3}.GPU|x64.Build.0 = Release|x64
+ {3F5388FF-FBB4-462B-8F6F-829FFBAEB8A3}.GPU|x86.ActiveCfg = Release|Any CPU
+ {3F5388FF-FBB4-462B-8F6F-829FFBAEB8A3}.GPU|x86.Build.0 = Release|Any CPU
+ {3F5388FF-FBB4-462B-8F6F-829FFBAEB8A3}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {3F5388FF-FBB4-462B-8F6F-829FFBAEB8A3}.Release|Any CPU.Build.0 = Release|Any CPU
+ {3F5388FF-FBB4-462B-8F6F-829FFBAEB8A3}.Release|x64.ActiveCfg = Release|x64
+ {3F5388FF-FBB4-462B-8F6F-829FFBAEB8A3}.Release|x64.Build.0 = Release|x64
+ {3F5388FF-FBB4-462B-8F6F-829FFBAEB8A3}.Release|x86.ActiveCfg = Release|Any CPU
+ {3F5388FF-FBB4-462B-8F6F-829FFBAEB8A3}.Release|x86.Build.0 = Release|Any CPU
+ {9738D16A-CFA0-405C-A7DF-D3D203B0CB18}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {9738D16A-CFA0-405C-A7DF-D3D203B0CB18}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {9738D16A-CFA0-405C-A7DF-D3D203B0CB18}.Debug|x64.ActiveCfg = Debug|Any CPU
+ {9738D16A-CFA0-405C-A7DF-D3D203B0CB18}.Debug|x64.Build.0 = Debug|Any CPU
+ {9738D16A-CFA0-405C-A7DF-D3D203B0CB18}.Debug|x86.ActiveCfg = Debug|Any CPU
+ {9738D16A-CFA0-405C-A7DF-D3D203B0CB18}.Debug|x86.Build.0 = Debug|Any CPU
+ {9738D16A-CFA0-405C-A7DF-D3D203B0CB18}.GPU|Any CPU.ActiveCfg = Debug|Any CPU
+ {9738D16A-CFA0-405C-A7DF-D3D203B0CB18}.GPU|Any CPU.Build.0 = Debug|Any CPU
+ {9738D16A-CFA0-405C-A7DF-D3D203B0CB18}.GPU|x64.ActiveCfg = Debug|Any CPU
+ {9738D16A-CFA0-405C-A7DF-D3D203B0CB18}.GPU|x64.Build.0 = Debug|Any CPU
+ {9738D16A-CFA0-405C-A7DF-D3D203B0CB18}.GPU|x86.ActiveCfg = Debug|Any CPU
+ {9738D16A-CFA0-405C-A7DF-D3D203B0CB18}.GPU|x86.Build.0 = Debug|Any CPU
+ {9738D16A-CFA0-405C-A7DF-D3D203B0CB18}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {9738D16A-CFA0-405C-A7DF-D3D203B0CB18}.Release|Any CPU.Build.0 = Release|Any CPU
+ {9738D16A-CFA0-405C-A7DF-D3D203B0CB18}.Release|x64.ActiveCfg = Release|Any CPU
+ {9738D16A-CFA0-405C-A7DF-D3D203B0CB18}.Release|x64.Build.0 = Release|Any CPU
+ {9738D16A-CFA0-405C-A7DF-D3D203B0CB18}.Release|x86.ActiveCfg = Release|Any CPU
+ {9738D16A-CFA0-405C-A7DF-D3D203B0CB18}.Release|x86.Build.0 = Release|Any CPU
+ {7DEA8760-E401-4872-81F3-405F185A13A0}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {7DEA8760-E401-4872-81F3-405F185A13A0}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {7DEA8760-E401-4872-81F3-405F185A13A0}.Debug|x64.ActiveCfg = Debug|Any CPU
+ {7DEA8760-E401-4872-81F3-405F185A13A0}.Debug|x64.Build.0 = Debug|Any CPU
+ {7DEA8760-E401-4872-81F3-405F185A13A0}.Debug|x86.ActiveCfg = Debug|Any CPU
+ {7DEA8760-E401-4872-81F3-405F185A13A0}.Debug|x86.Build.0 = Debug|Any CPU
+ {7DEA8760-E401-4872-81F3-405F185A13A0}.GPU|Any CPU.ActiveCfg = Debug|Any CPU
+ {7DEA8760-E401-4872-81F3-405F185A13A0}.GPU|Any CPU.Build.0 = Debug|Any CPU
+ {7DEA8760-E401-4872-81F3-405F185A13A0}.GPU|x64.ActiveCfg = Debug|Any CPU
+ {7DEA8760-E401-4872-81F3-405F185A13A0}.GPU|x64.Build.0 = Debug|Any CPU
+ {7DEA8760-E401-4872-81F3-405F185A13A0}.GPU|x86.ActiveCfg = Debug|Any CPU
+ {7DEA8760-E401-4872-81F3-405F185A13A0}.GPU|x86.Build.0 = Debug|Any CPU
+ {7DEA8760-E401-4872-81F3-405F185A13A0}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {7DEA8760-E401-4872-81F3-405F185A13A0}.Release|Any CPU.Build.0 = Release|Any CPU
+ {7DEA8760-E401-4872-81F3-405F185A13A0}.Release|x64.ActiveCfg = Release|Any CPU
+ {7DEA8760-E401-4872-81F3-405F185A13A0}.Release|x64.Build.0 = Release|Any CPU
+ {7DEA8760-E401-4872-81F3-405F185A13A0}.Release|x86.ActiveCfg = Release|Any CPU
+ {7DEA8760-E401-4872-81F3-405F185A13A0}.Release|x86.Build.0 = Release|Any CPU
+ {3D92142F-EEDB-469B-B03C-4E38728BFE4C}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {3D92142F-EEDB-469B-B03C-4E38728BFE4C}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {3D92142F-EEDB-469B-B03C-4E38728BFE4C}.Debug|x64.ActiveCfg = Debug|Any CPU
+ {3D92142F-EEDB-469B-B03C-4E38728BFE4C}.Debug|x64.Build.0 = Debug|Any CPU
+ {3D92142F-EEDB-469B-B03C-4E38728BFE4C}.Debug|x86.ActiveCfg = Debug|Any CPU
+ {3D92142F-EEDB-469B-B03C-4E38728BFE4C}.Debug|x86.Build.0 = Debug|Any CPU
+ {3D92142F-EEDB-469B-B03C-4E38728BFE4C}.GPU|Any CPU.ActiveCfg = Debug|Any CPU
+ {3D92142F-EEDB-469B-B03C-4E38728BFE4C}.GPU|Any CPU.Build.0 = Debug|Any CPU
+ {3D92142F-EEDB-469B-B03C-4E38728BFE4C}.GPU|x64.ActiveCfg = Debug|Any CPU
+ {3D92142F-EEDB-469B-B03C-4E38728BFE4C}.GPU|x64.Build.0 = Debug|Any CPU
+ {3D92142F-EEDB-469B-B03C-4E38728BFE4C}.GPU|x86.ActiveCfg = Debug|Any CPU
+ {3D92142F-EEDB-469B-B03C-4E38728BFE4C}.GPU|x86.Build.0 = Debug|Any CPU
+ {3D92142F-EEDB-469B-B03C-4E38728BFE4C}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {3D92142F-EEDB-469B-B03C-4E38728BFE4C}.Release|Any CPU.Build.0 = Release|Any CPU
+ {3D92142F-EEDB-469B-B03C-4E38728BFE4C}.Release|x64.ActiveCfg = Release|Any CPU
+ {3D92142F-EEDB-469B-B03C-4E38728BFE4C}.Release|x64.Build.0 = Release|Any CPU
+ {3D92142F-EEDB-469B-B03C-4E38728BFE4C}.Release|x86.ActiveCfg = Release|Any CPU
+ {3D92142F-EEDB-469B-B03C-4E38728BFE4C}.Release|x86.Build.0 = Release|Any CPU
+ {AB131FA7-B7C3-4ABF-ABDE-E059C72A613C}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {AB131FA7-B7C3-4ABF-ABDE-E059C72A613C}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {AB131FA7-B7C3-4ABF-ABDE-E059C72A613C}.Debug|x64.ActiveCfg = Debug|Any CPU
+ {AB131FA7-B7C3-4ABF-ABDE-E059C72A613C}.Debug|x64.Build.0 = Debug|Any CPU
+ {AB131FA7-B7C3-4ABF-ABDE-E059C72A613C}.Debug|x86.ActiveCfg = Debug|Any CPU
+ {AB131FA7-B7C3-4ABF-ABDE-E059C72A613C}.Debug|x86.Build.0 = Debug|Any CPU
+ {AB131FA7-B7C3-4ABF-ABDE-E059C72A613C}.GPU|Any CPU.ActiveCfg = Debug|Any CPU
+ {AB131FA7-B7C3-4ABF-ABDE-E059C72A613C}.GPU|Any CPU.Build.0 = Debug|Any CPU
+ {AB131FA7-B7C3-4ABF-ABDE-E059C72A613C}.GPU|x64.ActiveCfg = Debug|Any CPU
+ {AB131FA7-B7C3-4ABF-ABDE-E059C72A613C}.GPU|x64.Build.0 = Debug|Any CPU
+ {AB131FA7-B7C3-4ABF-ABDE-E059C72A613C}.GPU|x86.ActiveCfg = Debug|Any CPU
+ {AB131FA7-B7C3-4ABF-ABDE-E059C72A613C}.GPU|x86.Build.0 = Debug|Any CPU
+ {AB131FA7-B7C3-4ABF-ABDE-E059C72A613C}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {AB131FA7-B7C3-4ABF-ABDE-E059C72A613C}.Release|Any CPU.Build.0 = Release|Any CPU
+ {AB131FA7-B7C3-4ABF-ABDE-E059C72A613C}.Release|x64.ActiveCfg = Release|Any CPU
+ {AB131FA7-B7C3-4ABF-ABDE-E059C72A613C}.Release|x64.Build.0 = Release|Any CPU
+ {AB131FA7-B7C3-4ABF-ABDE-E059C72A613C}.Release|x86.ActiveCfg = Release|Any CPU
+ {AB131FA7-B7C3-4ABF-ABDE-E059C72A613C}.Release|x86.Build.0 = Release|Any CPU
+ {D24FCAA5-548C-4251-B226-A1B6535D0845}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {D24FCAA5-548C-4251-B226-A1B6535D0845}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {D24FCAA5-548C-4251-B226-A1B6535D0845}.Debug|x64.ActiveCfg = Debug|Any CPU
+ {D24FCAA5-548C-4251-B226-A1B6535D0845}.Debug|x64.Build.0 = Debug|Any CPU
+ {D24FCAA5-548C-4251-B226-A1B6535D0845}.Debug|x86.ActiveCfg = Debug|Any CPU
+ {D24FCAA5-548C-4251-B226-A1B6535D0845}.Debug|x86.Build.0 = Debug|Any CPU
+ {D24FCAA5-548C-4251-B226-A1B6535D0845}.GPU|Any CPU.ActiveCfg = Debug|Any CPU
+ {D24FCAA5-548C-4251-B226-A1B6535D0845}.GPU|Any CPU.Build.0 = Debug|Any CPU
+ {D24FCAA5-548C-4251-B226-A1B6535D0845}.GPU|x64.ActiveCfg = Debug|Any CPU
+ {D24FCAA5-548C-4251-B226-A1B6535D0845}.GPU|x64.Build.0 = Debug|Any CPU
+ {D24FCAA5-548C-4251-B226-A1B6535D0845}.GPU|x86.ActiveCfg = Debug|Any CPU
+ {D24FCAA5-548C-4251-B226-A1B6535D0845}.GPU|x86.Build.0 = Debug|Any CPU
+ {D24FCAA5-548C-4251-B226-A1B6535D0845}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {D24FCAA5-548C-4251-B226-A1B6535D0845}.Release|Any CPU.Build.0 = Release|Any CPU
+ {D24FCAA5-548C-4251-B226-A1B6535D0845}.Release|x64.ActiveCfg = Release|Any CPU
+ {D24FCAA5-548C-4251-B226-A1B6535D0845}.Release|x64.Build.0 = Release|Any CPU
+ {D24FCAA5-548C-4251-B226-A1B6535D0845}.Release|x86.ActiveCfg = Release|Any CPU
+ {D24FCAA5-548C-4251-B226-A1B6535D0845}.Release|x86.Build.0 = Release|Any CPU
+ {C23563DB-FE21-48E7-A411-87A109E4A899}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {C23563DB-FE21-48E7-A411-87A109E4A899}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {C23563DB-FE21-48E7-A411-87A109E4A899}.Debug|x64.ActiveCfg = Debug|x64
+ {C23563DB-FE21-48E7-A411-87A109E4A899}.Debug|x64.Build.0 = Debug|x64
+ {C23563DB-FE21-48E7-A411-87A109E4A899}.Debug|x86.ActiveCfg = Debug|Any CPU
+ {C23563DB-FE21-48E7-A411-87A109E4A899}.Debug|x86.Build.0 = Debug|Any CPU
+ {C23563DB-FE21-48E7-A411-87A109E4A899}.GPU|Any CPU.ActiveCfg = Debug|Any CPU
+ {C23563DB-FE21-48E7-A411-87A109E4A899}.GPU|Any CPU.Build.0 = Debug|Any CPU
+ {C23563DB-FE21-48E7-A411-87A109E4A899}.GPU|x64.ActiveCfg = Debug|x64
+ {C23563DB-FE21-48E7-A411-87A109E4A899}.GPU|x64.Build.0 = Debug|x64
+ {C23563DB-FE21-48E7-A411-87A109E4A899}.GPU|x86.ActiveCfg = Debug|Any CPU
+ {C23563DB-FE21-48E7-A411-87A109E4A899}.GPU|x86.Build.0 = Debug|Any CPU
+ {C23563DB-FE21-48E7-A411-87A109E4A899}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {C23563DB-FE21-48E7-A411-87A109E4A899}.Release|Any CPU.Build.0 = Release|Any CPU
+ {C23563DB-FE21-48E7-A411-87A109E4A899}.Release|x64.ActiveCfg = Release|x64
+ {C23563DB-FE21-48E7-A411-87A109E4A899}.Release|x64.Build.0 = Release|x64
+ {C23563DB-FE21-48E7-A411-87A109E4A899}.Release|x86.ActiveCfg = Release|Any CPU
+ {C23563DB-FE21-48E7-A411-87A109E4A899}.Release|x86.Build.0 = Release|Any CPU
+ {1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0}.Debug|x64.ActiveCfg = Debug|x64
+ {1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0}.Debug|x64.Build.0 = Debug|x64
+ {1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0}.Debug|x86.ActiveCfg = Debug|Any CPU
+ {1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0}.Debug|x86.Build.0 = Debug|Any CPU
+ {1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0}.GPU|Any CPU.ActiveCfg = Debug|Any CPU
+ {1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0}.GPU|Any CPU.Build.0 = Debug|Any CPU
+ {1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0}.GPU|x64.ActiveCfg = Debug|x64
+ {1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0}.GPU|x64.Build.0 = Debug|x64
+ {1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0}.GPU|x86.ActiveCfg = Debug|Any CPU
+ {1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0}.GPU|x86.Build.0 = Debug|Any CPU
+ {1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0}.Release|Any CPU.Build.0 = Release|Any CPU
+ {1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0}.Release|x64.ActiveCfg = Release|x64
+ {1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0}.Release|x64.Build.0 = Release|x64
+ {1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0}.Release|x86.ActiveCfg = Release|Any CPU
+ {1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0}.Release|x86.Build.0 = Release|Any CPU
+ {654A027D-1364-4729-880B-144DFE1FF5BB}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {654A027D-1364-4729-880B-144DFE1FF5BB}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {654A027D-1364-4729-880B-144DFE1FF5BB}.Debug|x64.ActiveCfg = Debug|Any CPU
+ {654A027D-1364-4729-880B-144DFE1FF5BB}.Debug|x64.Build.0 = Debug|Any CPU
+ {654A027D-1364-4729-880B-144DFE1FF5BB}.Debug|x86.ActiveCfg = Debug|Any CPU
+ {654A027D-1364-4729-880B-144DFE1FF5BB}.Debug|x86.Build.0 = Debug|Any CPU
+ {654A027D-1364-4729-880B-144DFE1FF5BB}.GPU|Any CPU.ActiveCfg = Debug|Any CPU
+ {654A027D-1364-4729-880B-144DFE1FF5BB}.GPU|Any CPU.Build.0 = Debug|Any CPU
+ {654A027D-1364-4729-880B-144DFE1FF5BB}.GPU|x64.ActiveCfg = Debug|Any CPU
+ {654A027D-1364-4729-880B-144DFE1FF5BB}.GPU|x64.Build.0 = Debug|Any CPU
+ {654A027D-1364-4729-880B-144DFE1FF5BB}.GPU|x86.ActiveCfg = Debug|Any CPU
+ {654A027D-1364-4729-880B-144DFE1FF5BB}.GPU|x86.Build.0 = Debug|Any CPU
+ {654A027D-1364-4729-880B-144DFE1FF5BB}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {654A027D-1364-4729-880B-144DFE1FF5BB}.Release|Any CPU.Build.0 = Release|Any CPU
+ {654A027D-1364-4729-880B-144DFE1FF5BB}.Release|x64.ActiveCfg = Release|Any CPU
+ {654A027D-1364-4729-880B-144DFE1FF5BB}.Release|x64.Build.0 = Release|Any CPU
+ {654A027D-1364-4729-880B-144DFE1FF5BB}.Release|x86.ActiveCfg = Release|Any CPU
+ {654A027D-1364-4729-880B-144DFE1FF5BB}.Release|x86.Build.0 = Release|Any CPU
+ {A73DF5A6-866E-4AED-9017-AA2EE86368C4}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {A73DF5A6-866E-4AED-9017-AA2EE86368C4}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {A73DF5A6-866E-4AED-9017-AA2EE86368C4}.Debug|x64.ActiveCfg = Debug|Any CPU
+ {A73DF5A6-866E-4AED-9017-AA2EE86368C4}.Debug|x64.Build.0 = Debug|Any CPU
+ {A73DF5A6-866E-4AED-9017-AA2EE86368C4}.Debug|x86.ActiveCfg = Debug|Any CPU
+ {A73DF5A6-866E-4AED-9017-AA2EE86368C4}.Debug|x86.Build.0 = Debug|Any CPU
+ {A73DF5A6-866E-4AED-9017-AA2EE86368C4}.GPU|Any CPU.ActiveCfg = Debug|Any CPU
+ {A73DF5A6-866E-4AED-9017-AA2EE86368C4}.GPU|Any CPU.Build.0 = Debug|Any CPU
+ {A73DF5A6-866E-4AED-9017-AA2EE86368C4}.GPU|x64.ActiveCfg = Debug|Any CPU
+ {A73DF5A6-866E-4AED-9017-AA2EE86368C4}.GPU|x64.Build.0 = Debug|Any CPU
+ {A73DF5A6-866E-4AED-9017-AA2EE86368C4}.GPU|x86.ActiveCfg = Debug|Any CPU
+ {A73DF5A6-866E-4AED-9017-AA2EE86368C4}.GPU|x86.Build.0 = Debug|Any CPU
+ {A73DF5A6-866E-4AED-9017-AA2EE86368C4}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {A73DF5A6-866E-4AED-9017-AA2EE86368C4}.Release|Any CPU.Build.0 = Release|Any CPU
+ {A73DF5A6-866E-4AED-9017-AA2EE86368C4}.Release|x64.ActiveCfg = Release|Any CPU
+ {A73DF5A6-866E-4AED-9017-AA2EE86368C4}.Release|x64.Build.0 = Release|Any CPU
+ {A73DF5A6-866E-4AED-9017-AA2EE86368C4}.Release|x86.ActiveCfg = Release|Any CPU
+ {A73DF5A6-866E-4AED-9017-AA2EE86368C4}.Release|x86.Build.0 = Release|Any CPU
EndGlobalSection
GlobalSection(SolutionProperties) = preSolution
HideSolutionNode = FALSE
EndGlobalSection
+ GlobalSection(NestedProjects) = preSolution
+ {FD682AC0-7B2D-45D3-8B0D-C6D678B04144} = {01A1787F-A9BE-4221-84E8-6360DD010AB6}
+ {23C28035-2FCE-41F3-9A12-E73CE8A5AE32} = {1B0918B9-65AD-4F34-A287-AF4597B27DBD}
+ {49D71826-C03D-4FA7-9BAC-22C1327E65CF} = {01A1787F-A9BE-4221-84E8-6360DD010AB6}
+ {1AB8108D-4FFE-4A16-88E7-328EAF686370} = {01A1787F-A9BE-4221-84E8-6360DD010AB6}
+ {F17AAECB-960A-4E18-A270-BAD776F0E55B} = {01A1787F-A9BE-4221-84E8-6360DD010AB6}
+ {84CA35F8-99FC-408E-8DF3-5AA175E5EFD3} = {1B0918B9-65AD-4F34-A287-AF4597B27DBD}
+ {79EB56DF-E29E-4AE2-A7D9-FE403FD919BA} = {1B0918B9-65AD-4F34-A287-AF4597B27DBD}
+ {3F5388FF-FBB4-462B-8F6F-829FFBAEB8A3} = {1B0918B9-65AD-4F34-A287-AF4597B27DBD}
+ {9738D16A-CFA0-405C-A7DF-D3D203B0CB18} = {01A1787F-A9BE-4221-84E8-6360DD010AB6}
+ {7DEA8760-E401-4872-81F3-405F185A13A0} = {1B0918B9-65AD-4F34-A287-AF4597B27DBD}
+ {3D92142F-EEDB-469B-B03C-4E38728BFE4C} = {E1A5D2B7-10AF-4876-85C0-7714EF274214}
+ {AB131FA7-B7C3-4ABF-ABDE-E059C72A613C} = {E1A5D2B7-10AF-4876-85C0-7714EF274214}
+ {D24FCAA5-548C-4251-B226-A1B6535D0845} = {E1A5D2B7-10AF-4876-85C0-7714EF274214}
+ {C23563DB-FE21-48E7-A411-87A109E4A899} = {E1A5D2B7-10AF-4876-85C0-7714EF274214}
+ {1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0} = {E1A5D2B7-10AF-4876-85C0-7714EF274214}
+ {654A027D-1364-4729-880B-144DFE1FF5BB} = {1B0918B9-65AD-4F34-A287-AF4597B27DBD}
+ {A73DF5A6-866E-4AED-9017-AA2EE86368C4} = {1B0918B9-65AD-4F34-A287-AF4597B27DBD}
+ EndGlobalSection
GlobalSection(ExtensibilityGlobals) = postSolution
SolutionGuid = {2DEAD3CC-486B-4918-A607-50B0DE7B114A}
EndGlobalSection
diff --git a/data/img001.bmp b/data/img001.bmp
new file mode 100644
index 000000000..d149d76f1
Binary files /dev/null and b/data/img001.bmp differ
diff --git a/docs/Example-fsharp.md b/docs/Example-fsharp.md
new file mode 100644
index 000000000..578543454
--- /dev/null
+++ b/docs/Example-fsharp.md
@@ -0,0 +1,55 @@
+Linear Regression in `Eager` mode:
+
+```fsharp
+#r "nuget: TensorFlow.Net"
+#r "nuget: TensorFlow.Keras"
+#r "nuget: SciSharp.TensorFlow.Redist"
+
+open Tensorflow
+open Tensorflow.NumPy
+open type Tensorflow.Binding
+open type Tensorflow.KerasApi
+
+let tf = New()
+tf.enable_eager_execution()
+
+// Parameters
+let training_steps = 1000
+let learning_rate = 0.01f
+let display_step = 100
+
+// Sample data
+let train_X =
+ np.array(3.3f, 4.4f, 5.5f, 6.71f, 6.93f, 4.168f, 9.779f, 6.182f, 7.59f, 2.167f,
+ 7.042f, 10.791f, 5.313f, 7.997f, 5.654f, 9.27f, 3.1f)
+let train_Y =
+ np.array(1.7f, 2.76f, 2.09f, 3.19f, 1.694f, 1.573f, 3.366f, 2.596f, 2.53f, 1.221f,
+ 2.827f, 3.465f, 1.65f, 2.904f, 2.42f, 2.94f, 1.3f)
+let n_samples = train_X.shape.[0]
+
+// We can set a fixed init value in order to demo
+let W = tf.Variable(-0.06f,name = "weight")
+let b = tf.Variable(-0.73f, name = "bias")
+let optimizer = keras.optimizers.SGD(learning_rate)
+
+// Run training for the given number of steps.
+for step = 1 to (training_steps + 1) do
+ // Run the optimization to update W and b values.
+ // Wrap computation inside a GradientTape for automatic differentiation.
+ use g = tf.GradientTape()
+ // Linear regression (Wx + b).
+ let pred = W * train_X + b
+ // Mean square error.
+ let loss = tf.reduce_sum(tf.pow(pred - train_Y,2)) / (2 * n_samples)
+ // should stop recording
+ // compute gradients
+ let gradients = g.gradient(loss,struct (W,b))
+
+ // Update W and b following gradients.
+ optimizer.apply_gradients(zip(gradients, struct (W,b)))
+
+ if (step % display_step) = 0 then
+ let pred = W * train_X + b
+ let loss = tf.reduce_sum(tf.pow(pred-train_Y,2)) / (2 * n_samples)
+ printfn $"step: {step}, loss: {loss.numpy()}, W: {W.numpy()}, b: {b.numpy()}"
+```
\ No newline at end of file
diff --git a/docs/README-CN.md b/docs/README-CN.md
new file mode 100644
index 000000000..9776b0fb8
--- /dev/null
+++ b/docs/README-CN.md
@@ -0,0 +1,228 @@
+
+
+**Tensorflow.NET**是AI框架[TensorFlow](https://www.tensorflow.org/)在.NET平台上的实现,支持C#和F#,可以用来搭建深度学习模型并进行训练和推理,并内置了Numpy API,可以用来进行其它科学计算。
+
+Tensorflow.NET并非对于Python的简单封装,而是基于C API的pure C#实现,因此使用时无需额外的环境,可以很方便地用NuGet直接安装使用。并且dotnet团队提供的[ML.NET](https://github.com/dotnet/machinelearning)也依赖于Tensorflow.NET,支持调用Tensorflow.NET进行训练和推理,可以很方便地融入.NET生态。
+
+与tensorflow相同,Tensorflow.NET也内置了Keras这一高级API,只要在安装Tensorflow.NET的同时安装Tensorflow.Keras就可以使用,Keras支持以模块化的方式调用模型,给模型的搭建提供了极大的便利。
+
+[](https://gitter.im/sci-sharp/community)
+[](https://ci.appveyor.com/project/Haiping-Chen/tensorflow-net)
+[](https://www.nuget.org/packages/TensorFlow.NET)
+[](https://tensorflownet.readthedocs.io/en/latest/?badge=latest)
+[](https://996.icu/#/en_US)
+[](https://mybinder.org/v2/gh/javiercp/BinderTF.NET/master?urlpath=lab)
+
+中文 | [English](https://github.com/SciSharp/TensorFlow.NET#readme)
+
+*当前主分支与Tensorflow2.10版本相对应,支持Eager Mode,同时也支持v1的静态图。*
+
+
+
+
+## Why Tensorflow.NET?
+
+`SciSharp STACK`开源社区的目标是构建.NET平台下易用的科学计算库,而Tensorflow.NET就是其中最具代表性的仓库之一。在深度学习领域Python是主流,无论是初学者还是资深开发者,模型的搭建和训练都常常使用Python写就的AI框架,比如tensorflow。但在实际应用深度学习模型的时候,又可能希望用到.NET生态,亦或只是因为.NET是自己最熟悉的领域,这时候Tensorflow.NET就有显著的优点,因为它不仅可以和.NET生态很好地贴合,其API还使得开发者很容易将Python代码迁移过来。下面的对比就是很好的例子,Python代码和C#代码有着高度相似的API,这会使得迁移的时候无需做过多修改。
+
+
+
+除了高度相似的API外,Tensorflow.NET与tensorflow也已经打通数据通道,tensorflow训练并保存的模型可以在Tensorflow.NET中直接读取并继续训练或推理,反之Tensorflow.NET保存的模型也可以在tensorflow中读取,这大大方便了模型的训练和部署。
+
+与其它类似的库比如[TensorFlowSharp](https://www.nuget.org/packages/TensorFlowSharp/)相比,Tensorflow.NET的实现更加完全,提供了更多的高级API,使用起来更为方便,更新也更加迅速。
+
+
+## 文档
+
+基本介绍与简单用例:[Tensorflow.NET Documents](https://scisharp.github.io/tensorflow-net-docs)
+
+详细文档:[The Definitive Guide to Tensorflow.NET](https://tensorflownet.readthedocs.io/en/latest/FrontCover.html)
+
+例程:[TensorFlow.NET Examples](https://github.com/SciSharp/TensorFlow.NET-Examples)
+
+运行例程常见问题:[Tensorflow.NET FAQ](tensorflowlib/README.md)
+
+## 安装与使用
+
+安装可以在NuGet包管理器中搜索包名安装,也可以用下面命令行的方式。
+
+安装分为两个部分,第一部分是Tensorflow.NET的主体:
+
+```sh
+### 安装Tensorflow.NET
+PM> Install-Package TensorFlow.NET
+
+### 安装Tensorflow.Keras
+PM> Install-Package TensorFlow.Keras
+```
+
+第二部分是计算支持部分,只需要根据自己的设备和系统选择下面之一即可:
+
+```
+### CPU版本,支持Windows、Linux和Mac
+PM> Install-Package SciSharp.TensorFlow.Redist
+
+### Windows下的GPU版本(需要安装CUDA和cuDNN)
+PM> Install-Package SciSharp.TensorFlow.Redist-Windows-GPU
+
+### Linux下的GPU版本(需要安装CUDA和cuDNN)
+PM> Install-Package SciSharp.TensorFlow.Redist-Linux-GPU
+```
+
+下面给出两个简单的例子,更多例子可以在[TensorFlow.NET Examples]中查看。
+
+### 简单例子(使用Eager Mode进行线性回归)
+
+```csharp
+using static Tensorflow.Binding;
+using static Tensorflow.KerasApi;
+using Tensorflow;
+using Tensorflow.NumPy;
+
+// Parameters
+var training_steps = 1000;
+var learning_rate = 0.01f;
+var display_step = 100;
+
+// Sample data
+var X = np.array(3.3f, 4.4f, 5.5f, 6.71f, 6.93f, 4.168f, 9.779f, 6.182f, 7.59f, 2.167f,
+ 7.042f, 10.791f, 5.313f, 7.997f, 5.654f, 9.27f, 3.1f);
+var Y = np.array(1.7f, 2.76f, 2.09f, 3.19f, 1.694f, 1.573f, 3.366f, 2.596f, 2.53f, 1.221f,
+ 2.827f, 3.465f, 1.65f, 2.904f, 2.42f, 2.94f, 1.3f);
+var n_samples = X.shape[0];
+
+// We can set a fixed init value in order to demo
+var W = tf.Variable(-0.06f, name: "weight");
+var b = tf.Variable(-0.73f, name: "bias");
+var optimizer = keras.optimizers.SGD(learning_rate);
+
+// Run training for the given number of steps.
+foreach (var step in range(1, training_steps + 1))
+{
+ // Run the optimization to update W and b values.
+ // Wrap computation inside a GradientTape for automatic differentiation.
+ using var g = tf.GradientTape();
+ // Linear regression (Wx + b).
+ var pred = W * X + b;
+ // Mean square error.
+ var loss = tf.reduce_sum(tf.pow(pred - Y, 2)) / (2 * n_samples);
+ // should stop recording
+ // Compute gradients.
+ var gradients = g.gradient(loss, (W, b));
+
+ // Update W and b following gradients.
+ optimizer.apply_gradients(zip(gradients, (W, b)));
+
+ if (step % display_step == 0)
+ {
+ pred = W * X + b;
+ loss = tf.reduce_sum(tf.pow(pred - Y, 2)) / (2 * n_samples);
+ print($"step: {step}, loss: {loss.numpy()}, W: {W.numpy()}, b: {b.numpy()}");
+ }
+}
+```
+
+这一用例也可以在[Jupyter Notebook Example](https://github.com/SciSharp/SciSharpCube)进行运行.
+
+### 简单例子(使用Keras搭建Resnet)
+
+```csharp
+using static Tensorflow.Binding;
+using static Tensorflow.KerasApi;
+using Tensorflow;
+using Tensorflow.NumPy;
+
+var layers = keras.layers;
+// input layer
+var inputs = keras.Input(shape: (32, 32, 3), name: "img");
+// convolutional layer
+var x = layers.Conv2D(32, 3, activation: "relu").Apply(inputs);
+x = layers.Conv2D(64, 3, activation: "relu").Apply(x);
+var block_1_output = layers.MaxPooling2D(3).Apply(x);
+x = layers.Conv2D(64, 3, activation: "relu", padding: "same").Apply(block_1_output);
+x = layers.Conv2D(64, 3, activation: "relu", padding: "same").Apply(x);
+var block_2_output = layers.Add().Apply(new Tensors(x, block_1_output));
+x = layers.Conv2D(64, 3, activation: "relu", padding: "same").Apply(block_2_output);
+x = layers.Conv2D(64, 3, activation: "relu", padding: "same").Apply(x);
+var block_3_output = layers.Add().Apply(new Tensors(x, block_2_output));
+x = layers.Conv2D(64, 3, activation: "relu").Apply(block_3_output);
+x = layers.GlobalAveragePooling2D().Apply(x);
+x = layers.Dense(256, activation: "relu").Apply(x);
+x = layers.Dropout(0.5f).Apply(x);
+// output layer
+var outputs = layers.Dense(10).Apply(x);
+// build keras model
+var model = keras.Model(inputs, outputs, name: "toy_resnet");
+model.summary();
+// compile keras model in tensorflow static graph
+model.compile(optimizer: keras.optimizers.RMSprop(1e-3f),
+ loss: keras.losses.SparseCategoricalCrossentropy(from_logits: true),
+ metrics: new[] { "acc" });
+// prepare dataset
+var ((x_train, y_train), (x_test, y_test)) = keras.datasets.cifar10.load_data();
+// normalize the input
+x_train = x_train / 255.0f;
+// training
+model.fit(x_train[new Slice(0, 2000)], y_train[new Slice(0, 2000)],
+ batch_size: 64,
+ epochs: 10,
+ validation_split: 0.2f);
+// save the model
+model.save("./toy_resnet_model");
+```
+
+此外,Tensorflow.NET也支持用F#搭建上述模型进行训练和推理。
+
+## Tensorflow.NET版本对应关系
+
+| TensorFlow.NET Versions | tensorflow 1.14, cuda 10.0 | tensorflow 1.15, cuda 10.0 | tensorflow 2.3, cuda 10.1 | tensorflow 2.4, cuda 11 | tensorflow 2.7, cuda 11 |tensorflow 2.10, cuda 11 |
+| -------------------------- | ------------- | -------------- | ------------- | ------------- | ------------ | ------------ |
+| tf.net 0.10x, tf.keras 0.10 | | | | | | x |
+| tf.net 0.7x, tf.keras 0.7 | | | | | x | |
+| tf.net 0.4x, tf.keras 0.5 | | | | x | | |
+| tf.net 0.3x, tf.keras 0.4 | | | x | | | |
+| tf.net 0.2x | | x | x | | | |
+| tf.net 0.15 | x | x | | | | |
+| tf.net 0.14 | x | | | | | |
+
+
+```
+tf.net 0.4x -> tf native 2.4
+tf.net 0.6x -> tf native 2.6
+tf.net 0.7x -> tf native 2.7
+tf.net 0.10x -> tf native 2.10
+...
+```
+
+如果使用过程中发现有缺失的版本,请告知我们,谢谢!
+
+请注意Tensorflow.NET与Tensorflow.Keras版本存在一一对应关系,请安装与Tensorflow.NET对应的Tensorflow.Keras版本。
+
+## 参与我们的开发:
+
+我们欢迎任何人的任何形式的贡献!无论是文档中的错误纠正,新特性提议,还是BUG修复等等,都会使得Tensorflow.NET项目越来越好,Tensorflow.NET的全体开发者也会积极帮助解决您提出的问题。
+
+下面任何一种形式都可以帮助Tensorflow.NET越来越好:
+
+* Star和分享Tensorflow.NET项目
+* 为Tensorflow.NET添加更多的用例
+* 在issue中告知我们Tensorflow.NET目前相比tensorflow缺少的API或者没有对齐的特性
+* 在issue中提出Tensorflow.NET存在的BUG或者可以改进的地方
+* 在待办事项清单中选择一个进行或者解决某个issue
+* 帮助我们完善文档,这也十分重要
+
+
+## 支持我们
+我们推出了[TensorFlow.NET实战](https://item.jd.com/13441549.html)这本书,包含了Tensorflow.NET主要开发者编写的讲解与实战例程,欢迎您的购买,希望这本书可以给您带来帮助。
+
+
+
+
+
+
+## 联系我们
+
+可以在 [Twitter](https://twitter.com/ScisharpStack), [Facebook](https://www.facebook.com/scisharp.stack.9), [Medium](https://medium.com/scisharp), [LinkedIn](https://www.linkedin.com/company/scisharp-stack/)中关注我们,也可以在[Gitter](https://gitter.im/sci-sharp/community)中与项目开发者以及其它使用者进行沟通交流,也欢迎在仓库中提起issue。
+
+TensorFlow.NET is a part of [SciSharp STACK](https://scisharp.github.io/SciSharp/)
+
+
diff --git a/docs/RELEASE.md b/docs/RELEASE.md
new file mode 100644
index 000000000..62a1be238
--- /dev/null
+++ b/docs/RELEASE.md
@@ -0,0 +1,44 @@
+# Release Notes
+
+**Thanks to our Contributors!**
+
+This release contains contributions from many people at SciSharp as well as the external contributors.
+
+**Release Date 02/06/2021**
+
+### TensorFlow.Binding v0.33.0
+
+* Improve memory usage
+* Fix minor bugs
+
+### TensorFlow.Keras v0.4.0
+
+* Add Subtract layer
+
+* Add model.load_weights and model.save_weights
+
+* Fix memory leak issue
+
+* Support to build YOLOv3 object detection model
+
+
+
+**Release Date 01/09/2021**
+
+### TensorFlow.Binding v0.32.0
+
+* Fix input `dtype` for `MapDataset`.
+* Fix `image_dataset_from_directory` function.
+* Fix `tf.transpose`.
+* Add `array_ops.where_v2`, `array_ops.select_v2`, `array_ops.softplus`.
+* Add `dataset.dataset_cardinality`.
+
+### TensorFlow.Keras v0.3.0
+
+* Fix `weight` init value for `double` type in `compute_weighted_loss`.
+* Add `MeanSquaredError `, `MeanAbsolutePercentageError `, `MeanAbsoluteError` and `MeanSquaredLogarithmicError` loss functions.
+* `Sequential` model API works.
+* Add `ShellProgressBar` to show training progress better.
+
+
+
diff --git a/docs/TIM.jpg b/docs/TIM.jpg
deleted file mode 100644
index a436aa301..000000000
Binary files a/docs/TIM.jpg and /dev/null differ
diff --git a/docs/assets/WeChatCollection.jpg b/docs/assets/WeChatCollection.jpg
new file mode 100644
index 000000000..587b54991
Binary files /dev/null and b/docs/assets/WeChatCollection.jpg differ
diff --git a/docs/assets/performance-comparison.jpg b/docs/assets/performance-comparison.jpg
new file mode 100644
index 000000000..382f7ab61
Binary files /dev/null and b/docs/assets/performance-comparison.jpg differ
diff --git a/docs/source/Constant.md b/docs/source/Constant.md
index 4d782f119..dd6aa3bf0 100644
--- a/docs/source/Constant.md
+++ b/docs/source/Constant.md
@@ -1,6 +1,6 @@
-# Chapter. Constant
+# Chapter 2. Constant
-In TensorFlow, a constant is a special Tensor that cannot be modified while the graph is running. Like in a linear model $\tilde{y_i}=\boldsymbol{w}x_i+b$, constant $b$ can be represented as a Constant Tensor. Since the constant is a Tensor, it also has all the data characteristics of Tensor, including:
+In TensorFlow, a constant is a special Tensor that cannot be modified while the graph is running. Like in a linear model `y = ax + b`, constant `b` can be represented as a `Constant` Tensor. Since the constant is a Tensor, it also has all the data characteristics of Tensor, including:
* value: scalar value or constant list matching the data type defined in TensorFlow;
* dtype: data type;
@@ -9,9 +9,9 @@ In TensorFlow, a constant is a special Tensor that cannot be modified while the
-##### How to create a Constant
+### How to create a Constant
-TensorFlow provides a handy function to create a Constant. In TF.NET, you can use the same function name `tf.constant` to create it. TF.NET takes the same name as python binding to the API. Naming, although this will make developers who are used to C# naming habits feel uncomfortable, but after careful consideration, I decided to give up the C# convention naming method.
+TensorFlow provides a handy function to create a Constant. In TF.NET, you can use the same function name `tf.constant` to create it. TF.NET takes the same name as python binding for the API. Naming, although this will make developers who are used to C# naming convention feel uncomfortable, but after careful consideration, I decided to give up the C# convention naming method. One of reason is for model developer, they don't have to learn a totally new different APIs.
Initialize a scalar constant:
@@ -24,21 +24,57 @@ var c4 = tf.constant("Big Tree"); // string
Initialize a constant through ndarray:
+TF.NET works very well with `NumSharp`'s `NDArray`. You can create a tensor from .NET primitive data type and NDArray as well. An `ndarray` is a (usually fixed-size) multidimensional container of items of the same type and size. The number of dimensions and items in an array is defined by its `shape`, which is a tuple of N non-negative integers that specify the sizes of each dimension.
+
```csharp
// dtype=int, shape=(2, 3)
-var nd = np.array(new int[][]
+var nd = np.array(new int[,]
{
- new int[]{3, 1, 1},
- new int[]{2, 3, 1}
+ {1, 2, 3},
+ {4, 5, 6}
});
var tensor = tf.constant(nd);
```
-##### Dive in Constant
+### Dive in Constant
+
+Now let's explore how `constant` works in `eager` mode inside the black box.
+
+Let's continue using the last examples, we're going to initialize a tensor in an ndarray of `[shape(2, 3), int32]`.
+
+##### NDArray
+
+The first thing we need to know is about `ndarray`'s memory model. The ndarray memory model is a very important data structure, and almost all underlying computation are inseparable from this datb a structure. One fundamental aspect of the ndarray is that an array is seen as a "chunk" of memory starting at some location. The interpretation of this memory depends on the stride information. A segment of memory is inherently 1-dimensional, and there are many different schemes for arranging the items of an N-dimensional array in a 1-dimensional block. `ndarray` objects can accommodate any strided indexing scheme. In a strided scheme, the N-dimensional index corresponds to the offset (in bytes) : .
+
+
+
+If we take a look at the real memory allocation in Visual Studio, below diagram helps us understand the data structure more intuitively. The strides keep track the size of every single dimension, help identify the actual offset in heap memory. The formula to calculate offset is: `offset = i * strides[0] + j * strides[1]`.
+
+For example: if you want to seek the value in `[1, 1]`, you just need to calculate `1 * 3 + 1 * 1 = 4`, converted to pointer is `0x000002556B194260 + 4 = 0x000002556B194264` where has a value `05`.
+
+
-Now let's explore how `constant` works.
+Through the above diagram, we know how the data is stored in memory, and then we will look at how the data is transferred to `TensorFlow`.
+##### Tensor
+
+If you don't understand very well what `Tensor` is, you can go back to the chapter `Tensor` there is pretty much explanation if you skipped that chapter. Tensor is actually an NDArray that is with more than 2 dimensions.
+
+TensorFlow will decide whether to copy the data or use the same pointer. Normally speaking, it's more safe whenever you copy data for the following process, especially in interoperating between .NET runtime and C++ runtime that they all have their own garbage collection (GC) mechanism, application will crash if someone access a block of destroyed memory. `TF_STRING` and `TF_RESOURCE` tensors have a different representation in `TF_Tensor` than they do in `tensorflow::Tensor`. Other types have the same representation, so copy only if it is safe to do so.
+
+
+
+Before tensorflow is creating the `TF_Tensor`, it checks the shape and data size. If the size doesn't match, it will return `nullptr` pointer.
+
+##### Get the data of Tensor
+
+For `eager` mode, it's pretty simple to view the actual value in a `tensor`.
+
+```csharp
+var data = tensor.numpy()
+```
+The `data` will be a `ndarray` variable.
##### Other functions to create a Constant
diff --git a/docs/source/EagerMode.md b/docs/source/EagerMode.md
index cbb0ea026..ded56d41f 100644
--- a/docs/source/EagerMode.md
+++ b/docs/source/EagerMode.md
@@ -1,2 +1,3 @@
-# Chapter. Eager Mode
+# Chapter 4. Eager Mode
+TensorFlow's eager execution is an imperative programming environment that evaluates operations immediately, without building graphs: operations return concrete values instead of constructing a computational graph to run later. This makes it easy to get started with TensorFlow and debug models, and it reduces boilerplate as well.
\ No newline at end of file
diff --git a/docs/source/Graph.md b/docs/source/Graph.md
index 7bc473f25..874cd9a42 100644
--- a/docs/source/Graph.md
+++ b/docs/source/Graph.md
@@ -1,4 +1,4 @@
-# Chapter. Graph
+# Chapter 3. Graph
TensorFlow uses a **dataflow graph** to represent your computation in terms of the dependencies between individual operations. A graph defines the computation. It doesn't compute anything, it doesn't hold any values, it just defines the operations that you specified in your code.
diff --git a/docs/source/HelloWorld.md b/docs/source/HelloWorld.md
index 8023d9f9c..8b7fbf733 100644
--- a/docs/source/HelloWorld.md
+++ b/docs/source/HelloWorld.md
@@ -10,7 +10,7 @@ Let's run a classic HelloWorld program first and see if TensorFlow is running on
### Install the TensorFlow.NET SDK
-TensorFlow.NET uses the .NET Standard 2.0 standard, so your new project Target Framework can be .NET Framework or .NET Core. All the examples in this book are using .NET Core 2.2 and Microsoft Visual Studio Community 2017. To start building TensorFlow program you just need to download and install the .NET SDK (Software Development Kit). You have to download the latest .NET Core SDK from offical website: https://dotnet.microsoft.com/download.
+TensorFlow.NET uses the .NET Standard 2.0 standard, so your new project Target Framework can be .NET Framework or .NET Core/ .NET 5. All the examples in this book are using .NET Core 3.1 and Microsoft Visual Studio Community 2019. To start building TensorFlow program you just need to download and install the .NET SDK (Software Development Kit). You have to download the latest .NET Core SDK from offical website: https://dotnet.microsoft.com/download.
@@ -25,51 +25,52 @@ TensorFlow.NET uses the .NET Standard 2.0 standard, so your new project Target F
```cmd
+### install tensorflow C# binding
PM> Install-Package TensorFlow.NET
+
+### Install tensorflow binary
+### For CPU version
+PM> Install-Package SciSharp.TensorFlow.Redist
+
+### For GPU version (CUDA and cuDNN are required)
+PM> Install-Package SciSharp.TensorFlow.Redist-Windows-GPU
```
### Start coding Hello World
-After installing the TensorFlow.NET package, you can use the `using Tensorflow` to introduce the TensorFlow library.
-
+After installing the TensorFlow.NET package, you can use the `using static Tensorflow.Binding` to introduce the TensorFlow .NET library.
+TensorFlow 2.x enabled `Eager Mode` by default. About what eager mode is, I will introduce it in detail in the following chapters.
```csharp
using System;
-using Tensorflow;
+using static Tensorflow.Binding;
namespace TensorFlowNET.Examples
{
///
/// Simple hello world using TensorFlow
///
- public class HelloWorld : IExample
+ class Program
{
- public void Run()
+ static void Main(string[] args)
{
- /* Create a Constant op
- The op is added as a node to the default graph.
-
- The value returned by the constructor represents the output
- of the Constant op. */
var hello = tf.constant("Hello, TensorFlow!");
-
- // Start tf session
- using (var sess = tf.Session())
- {
- // Run the op
- var result = sess.run(hello);
- Console.WriteLine(result);
- }
+ Console.WriteLine(hello);
}
}
}
```
After CTRL + F5 run, you will get the output.
```cmd
-2019-01-05 10:53:42.145931: I tensorflow/core/platform/cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2
-Hello, TensorFlow!
-Press any key to continue . . .
+9/20/2020 2:15:09 AM Starting Hello World
+tf.Tensor: shape=(), dtype=string, numpy=Hello, TensorFlow.NET!
+9/20/2020 2:15:09 AM Completed Hello World
+Example: Hello World in 0.1273463s is OK!
+TensorFlow.NET v0.20.1.0
+TensorFlow Binary v2.3.0
+1 of 21 example(s) are completed.
+Press [Enter] to continue...
```
This sample code can be found at [here](https://github.com/SciSharp/SciSharp-Stack-Examples/blob/master/src/TensorFlowNET.Examples/HelloWorld.cs).
diff --git a/docs/source/LinearRegression.md b/docs/source/LinearRegression.md
index 81a6dbc4c..8033625c3 100644
--- a/docs/source/LinearRegression.md
+++ b/docs/source/LinearRegression.md
@@ -82,4 +82,4 @@ When we visualize the graph in TensorBoard:

-The full example is [here](https://github.com/SciSharp/TensorFlow.NET/blob/master/test/TensorFlowNET.Examples/BasicModels/LinearRegression.cs).
+The full example is [here](https://github.com/SciSharp/TensorFlow.NET-Examples/blob/master/src/TensorFlowNET.Examples/BasicModels/LinearRegression.cs).
diff --git a/docs/source/LogisticRegression.md b/docs/source/LogisticRegression.md
index 42cda8983..ddf75f846 100644
--- a/docs/source/LogisticRegression.md
+++ b/docs/source/LogisticRegression.md
@@ -13,4 +13,4 @@ The dependent variable of logistics regression can be two-category or multi-cate
Softmax regression allows us to handle  where K is the number of classes.
-The full example is [here](https://github.com/SciSharp/TensorFlow.NET/blob/master/test/TensorFlowNET.Examples/BasicModels/LogisticRegression.cs).
+The full example is [here](https://github.com/SciSharp/TensorFlow.NET-Examples/blob/master/src/TensorFlowNET.Examples/BasicModels/LogisticRegression.cs).
diff --git a/docs/source/NearestNeighbor.md b/docs/source/NearestNeighbor.md
index 861181aae..94e300df6 100644
--- a/docs/source/NearestNeighbor.md
+++ b/docs/source/NearestNeighbor.md
@@ -2,4 +2,4 @@
The nearest neighbour algorithm was one of the first algorithms used to solve the travelling salesman problem. In it, the salesman starts at a random city and repeatedly visits the nearest city until all have been visited. It quickly yields a short tour, but usually not the optimal one.
-The full example is [here](https://github.com/SciSharp/TensorFlow.NET/blob/master/test/TensorFlowNET.Examples/BasicModels/NearestNeighbor.cs).
\ No newline at end of file
+The full example is [here](https://github.com/SciSharp/TensorFlow.NET-Examples/blob/master/src/TensorFlowNET.Examples/BasicModels/NearestNeighbor.cs).
\ No newline at end of file
diff --git a/docs/source/Placeholder.md b/docs/source/Placeholder.md
index a578a1272..2cf345bd0 100644
--- a/docs/source/Placeholder.md
+++ b/docs/source/Placeholder.md
@@ -8,13 +8,13 @@ In this chapter we will talk about another common data type in TensorFlow: Place
var x = tf.placeholder(tf.int32);
var y = x * 3;
-Python.with(tf.Session(), sess =>
+using (var sess = tf.Session())
{
var result = sess.run(y, feed_dict: new FeedItem[]
{
new FeedItem(x, 2)
});
// (int)result should be 6;
-});
+}
```
diff --git a/docs/source/Tensor.md b/docs/source/Tensor.md
index 50cc6a440..aefb884f7 100644
--- a/docs/source/Tensor.md
+++ b/docs/source/Tensor.md
@@ -1,4 +1,4 @@
-# Chapter. Tensor
+# Chapter 1. Tensor
### Represents one of the outputs of an Operation
@@ -6,13 +6,13 @@
##### What is Tensor?
-Tensor holds a multi-dimensional array of elements of a single data type which is very similar with numpy's ndarray. When the dimension is zero, it can be called a scalar. When the dimension is 2, it can be called a matrix. When the dimension is greater than 2, it is usually called a tensor. If you are very familiar with numpy, then understanding Tensor will be quite easy.
-
+Tensor holds a multi-dimensional array of elements of a single data type which is very similar with `NumPy`'s `ndarray`. When the dimension is zero, it can be called a scalar. When the dimension is 2, it can be called a matrix. When the dimension is greater than 2, it is usually called a tensor. If you are very familiar with `NumPy`, then understanding Tensor will be quite easy.
+
##### How to create a Tensor?
-There are many ways to initialize a Tensor object in TF.NET. It can be initialized from a scalar, string, matrix or tensor.
+There are many ways to initialize a Tensor object in TF.NET. It can be initialized from a scalar, string, matrix or tensor. But the best way to create a Tensor is using high level APIs like `tf.constant`, `tf.zeros` and `tf.ones`. We'll talk about constant more detail in next chapter.
```csharp
// Create a tensor holds a scalar value
@@ -32,13 +32,9 @@ Console.WriteLine($"t1: {t1}, t2: {t2}, t3: {t3}");
##### Data Structure of Tensor
-
-
-
-
TF uses column major order. If we use NumSharp to generate a 2 x 3 matrix, if we access the data from 0 to 5 in order, we won't get a number of 1-6, but we get the order of 1, 4, 2, 5, 3, 6. a set of numbers.
-```cs
+```csharp
// Generate a matrix:[[1, 2, 3], [4, 5, 6]]
var nd = np.array(1f, 2f, 3f, 4f, 5f, 6f).reshape(2, 3);
// The index will be 0 2 4 1 3 5, it's column-major order.
@@ -49,3 +45,8 @@ var nd = np.array(1f, 2f, 3f, 4f, 5f, 6f).reshape(2, 3);


+
+##### Index/ Slice of Tensor
+
+Tensor element can be accessed by `index` and `slice` related operations. Through some high level APIs, we can easily access specific dimension's data.
+
diff --git a/docs/source/_static/constant/n-index-formula-offset.svg b/docs/source/_static/constant/n-index-formula-offset.svg
new file mode 100644
index 000000000..6c5a3219c
--- /dev/null
+++ b/docs/source/_static/constant/n-index-formula-offset.svg
@@ -0,0 +1,41 @@
+
+
+
\ No newline at end of file
diff --git a/docs/source/_static/constant/n-index-formula.svg b/docs/source/_static/constant/n-index-formula.svg
new file mode 100644
index 000000000..5d05c06f0
--- /dev/null
+++ b/docs/source/_static/constant/n-index-formula.svg
@@ -0,0 +1,33 @@
+
+
+
\ No newline at end of file
diff --git a/docs/source/_static/contiguous-block-of-memory-ndarray-example-1.png b/docs/source/_static/contiguous-block-of-memory-ndarray-example-1.png
new file mode 100644
index 000000000..140e37716
Binary files /dev/null and b/docs/source/_static/contiguous-block-of-memory-ndarray-example-1.png differ
diff --git a/docs/source/_static/contiguous-block-of-memory.png b/docs/source/_static/contiguous-block-of-memory.png
new file mode 100644
index 000000000..44d3ab62f
Binary files /dev/null and b/docs/source/_static/contiguous-block-of-memory.png differ
diff --git a/docs/source/_static/tensor-constant-ndarray.png b/docs/source/_static/tensor-constant-ndarray.png
new file mode 100644
index 000000000..3610ee0cd
Binary files /dev/null and b/docs/source/_static/tensor-constant-ndarray.png differ
diff --git a/docs/source/_static/tensor-naming.png b/docs/source/_static/tensor-naming.png
new file mode 100644
index 000000000..7b1d408b9
Binary files /dev/null and b/docs/source/_static/tensor-naming.png differ
diff --git a/src/SciSharp.TensorFlow.Redist/README.md b/src/SciSharp.TensorFlow.Redist/README.md
index 6dfce3e1e..4002aa21d 100644
--- a/src/SciSharp.TensorFlow.Redist/README.md
+++ b/src/SciSharp.TensorFlow.Redist/README.md
@@ -22,11 +22,19 @@ https://www.nuget.org/packages/SciSharp.TensorFlow.Redist
Related merged [commits](https://github.com/SciSharp/TensorFlow.NET/commit/854a5ba61ad0e400623821236bd117cc24c6cb77).
+
+
+#### Download pre-build package
+
+[Mac OSX CPU](https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-cpu-darwin-x86_64-2.10.0.tar.gz), [Linux CPU](https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-cpu-linux-x86_64-2.10.0.tar.gz), [Linux GPU](https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-gpu-linux-x86_64-2.10.0.tar.gz), [Windows CPU](https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-cpu-windows-x86_64-2.10.0.zip), [Windows GPU](https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-gpu-windows-x86_64-2.10.0.zip)
+
+
+
#### Pack and Deploy ####
On Windows, the tar command does not support extracting archives with symlinks. So when `dotnet pack` runs on Windows it will only package the Windows binaries.
1. Run `dotnet pack SciSharp.TensorFlow.Redist.nupkgproj` under `src/SciSharp.TensorFlow.Redist` directory in Linux.
-2. Run `dotnet nuget push SciSharp.TensorFlow.Redist.1.15.0.nupkg -k APIKEY -s https://api.nuget.org/v3/index.json`
+2. Run `dotnet nuget push SciSharp.TensorFlow.Redist.2.10.0.nupkg -k APIKEY -s https://api.nuget.org/v3/index.json -t 600`
diff --git a/src/TensorFlowNET.Core/APIs/c_api.cs b/src/TensorFlowNET.Core/APIs/c_api.cs
index bdf2785fe..a91b86827 100644
--- a/src/TensorFlowNET.Core/APIs/c_api.cs
+++ b/src/TensorFlowNET.Core/APIs/c_api.cs
@@ -1,5 +1,5 @@
/*****************************************************************************
- Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
+ Copyright 2020 Haiping Chen. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -16,6 +16,7 @@ limitations under the License.
using System;
using System.Runtime.InteropServices;
+using static Tensorflow.CppShapeInferenceResult.Types;
namespace Tensorflow
{
@@ -43,15 +44,48 @@ namespace Tensorflow
///
public partial class c_api
{
- public const string TensorFlowLibName = @"D:\SciSharp\tensorflow-google\bazel-bin\tensorflow\tensorflow.dll";
+ public const string TensorFlowLibName = "tensorflow";
public static string StringPiece(IntPtr handle)
{
return handle == IntPtr.Zero ? String.Empty : Marshal.PtrToStringAnsi(handle);
}
+ public unsafe static byte[] ByteStringPiece(Buffer? handle)
+ {
+ if (handle is null)
+ {
+ return new byte[0];
+ }
+ var data = handle.ToArray();
+ return data;
+ }
+
+ public unsafe static byte[] ByteStringPieceFromNativeString(IntPtr handle)
+ {
+ if (handle == IntPtr.Zero)
+ {
+ return new byte[0];
+ }
+
+ byte* str_data = (byte*)handle.ToPointer();
+ List bytes = new List();
+ byte current = 255;
+ while (current != ((byte)'\0'))
+ {
+ current = *(str_data++);
+ bytes.Add(current);
+ }
+ var data = bytes.ToArray();
+ return data;
+ }
+
+ [UnmanagedFunctionPointer(CallingConvention.Winapi)]
public delegate void Deallocator(IntPtr data, IntPtr size, ref DeallocatorArgs args);
+ [UnmanagedFunctionPointer(CallingConvention.Winapi)]
+ public delegate void DeallocatorV2(IntPtr data, long size, IntPtr args);
+
public struct DeallocatorArgs
{
internal static unsafe c_api.DeallocatorArgs* EmptyPtr;
@@ -59,8 +93,8 @@ public struct DeallocatorArgs
static unsafe DeallocatorArgs()
{
- Empty = new IntPtr(EmptyPtr = (DeallocatorArgs*) Marshal.AllocHGlobal(Marshal.SizeOf()));
- *EmptyPtr = new DeallocatorArgs() {gc_handle = IntPtr.Zero, deallocator_called = false};
+ Empty = new IntPtr(EmptyPtr = (DeallocatorArgs*)Marshal.AllocHGlobal(Marshal.SizeOf()));
+ *EmptyPtr = new DeallocatorArgs() { gc_handle = IntPtr.Zero, deallocator_called = false };
}
public bool deallocator_called;
@@ -68,6 +102,6 @@ static unsafe DeallocatorArgs()
}
[DllImport(TensorFlowLibName)]
- public static extern IntPtr TF_Version();
+ internal static extern IntPtr TF_Version();
}
}
diff --git a/src/TensorFlowNET.Core/APIs/c_api.customize.cs b/src/TensorFlowNET.Core/APIs/c_api.customize.cs
new file mode 100644
index 000000000..bee4897ee
--- /dev/null
+++ b/src/TensorFlowNET.Core/APIs/c_api.customize.cs
@@ -0,0 +1,17 @@
+using System;
+using System.Collections.Generic;
+using System.Runtime.InteropServices;
+using System.Text;
+
+namespace Tensorflow
+{
+ public partial class c_api
+ {
+ [DllImport(TensorFlowLibName)]
+ public static extern void TF_SetAttr(SafeGraphHandle graph, IntPtr op, string attr_name, SafeBufferHandle attr_value_proto, SafeStatusHandle status);
+ [DllImport(TensorFlowLibName)]
+ public static extern SafeBufferHandle TF_GetHandleShapeAndType(SafeGraphHandle c_graph, TF_Output output);
+ [DllImport(TensorFlowLibName)]
+ public static extern void TF_SetHandleShapeAndType(SafeGraphHandle c_graph, TF_Output output, byte[] data, long proto_len, SafeStatusHandle status);
+ }
+}
diff --git a/src/TensorFlowNET.Core/APIs/c_api_lite.cs b/src/TensorFlowNET.Core/APIs/c_api_lite.cs
new file mode 100644
index 000000000..5a437d261
--- /dev/null
+++ b/src/TensorFlowNET.Core/APIs/c_api_lite.cs
@@ -0,0 +1,91 @@
+using System;
+using System.Collections.Generic;
+using System.Runtime.InteropServices;
+using System.Text;
+using Tensorflow.Lite;
+
+namespace Tensorflow
+{
+ public class c_api_lite
+ {
+ public const string TensorFlowLibName = "tensorflowlite_c";
+
+ public static string StringPiece(IntPtr handle)
+ {
+ return handle == IntPtr.Zero ? String.Empty : Marshal.PtrToStringAnsi(handle);
+ }
+
+ [DllImport(TensorFlowLibName)]
+ public static extern IntPtr TfLiteVersion();
+
+ [DllImport(TensorFlowLibName)]
+ public static extern SafeTfLiteModelHandle TfLiteModelCreateFromFile(string model_path);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern void TfLiteModelDelete(IntPtr model);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern SafeTfLiteInterpreterOptionsHandle TfLiteInterpreterOptionsCreate();
+
+ [DllImport(TensorFlowLibName)]
+ public static extern void TfLiteInterpreterOptionsDelete(IntPtr options);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern void TfLiteInterpreterOptionsSetNumThreads(SafeTfLiteInterpreterOptionsHandle options, int num_threads);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern SafeTfLiteInterpreterHandle TfLiteInterpreterCreate(SafeTfLiteModelHandle model, SafeTfLiteInterpreterOptionsHandle optional_options);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern void TfLiteInterpreterDelete(IntPtr interpreter);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern TfLiteStatus TfLiteInterpreterAllocateTensors(SafeTfLiteInterpreterHandle interpreter);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern int TfLiteInterpreterGetInputTensorCount(SafeTfLiteInterpreterHandle interpreter);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern int TfLiteInterpreterGetOutputTensorCount(SafeTfLiteInterpreterHandle interpreter);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern TfLiteStatus TfLiteInterpreterResizeInputTensor(SafeTfLiteInterpreterHandle interpreter,
+ int input_index, int[] input_dims, int input_dims_size);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern TfLiteTensor TfLiteInterpreterGetInputTensor(SafeTfLiteInterpreterHandle interpreter, int input_index);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern TfLiteDataType TfLiteTensorType(TfLiteTensor tensor);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern int TfLiteTensorNumDims(TfLiteTensor tensor);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern int TfLiteTensorDim(TfLiteTensor tensor, int dim_index);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern int TfLiteTensorByteSize(TfLiteTensor tensor);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern IntPtr TfLiteTensorData(TfLiteTensor tensor);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern IntPtr TfLiteTensorName(TfLiteTensor tensor);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern TfLiteQuantizationParams TfLiteTensorQuantizationParams(TfLiteTensor tensor);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern TfLiteStatus TfLiteTensorCopyFromBuffer(TfLiteTensor tensor, IntPtr input_data, int input_data_size);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern TfLiteStatus TfLiteInterpreterInvoke(SafeTfLiteInterpreterHandle interpreter);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern IntPtr TfLiteInterpreterGetOutputTensor(SafeTfLiteInterpreterHandle interpreter, int output_index);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern TfLiteStatus TfLiteTensorCopyToBuffer(TfLiteTensor output_tensor, IntPtr output_data, int output_data_size);
+ }
+}
diff --git a/src/TensorFlowNET.Core/APIs/keras.layers.cs b/src/TensorFlowNET.Core/APIs/keras.layers.cs
deleted file mode 100644
index 92900e767..000000000
--- a/src/TensorFlowNET.Core/APIs/keras.layers.cs
+++ /dev/null
@@ -1,64 +0,0 @@
-/*****************************************************************************
- Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-******************************************************************************/
-
-using System.Linq;
-using Tensorflow.Keras.Layers;
-
-namespace Tensorflow
-{
- public static partial class keras
- {
- public static class layers
- {
- public static Embedding Embedding(int input_dim, int output_dim,
- IInitializer embeddings_initializer = null,
- bool mask_zero = false) => new Embedding(input_dim, output_dim,
- embeddings_initializer,
- mask_zero);
-
- public static Tensor[] Input(int[] batch_shape = null,
- TF_DataType dtype = TF_DataType.DtInvalid,
- string name = null,
- bool sparse = false,
- Tensor tensor = null)
- {
- var batch_size = batch_shape[0];
- var shape = batch_shape.Skip(1).ToArray();
-
- InputLayer input_layer = null;
- if (batch_shape != null)
- input_layer = new InputLayer(
- batch_input_shape: batch_shape,
- name: name,
- dtype: dtype,
- sparse: sparse,
- input_tensor: tensor);
- else
- input_layer = new InputLayer(
- input_shape: shape,
- batch_size: batch_size,
- name: name,
- dtype: dtype,
- sparse: sparse,
- input_tensor: tensor);
-
- var outputs = input_layer.inbound_nodes[0].output_tensors;
-
- return outputs;
- }
- }
- }
-}
diff --git a/src/TensorFlowNET.Core/APIs/keras.preprocessing.cs b/src/TensorFlowNET.Core/APIs/keras.preprocessing.cs
deleted file mode 100644
index 125b26f73..000000000
--- a/src/TensorFlowNET.Core/APIs/keras.preprocessing.cs
+++ /dev/null
@@ -1,28 +0,0 @@
-/*****************************************************************************
- Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-******************************************************************************/
-
-using Tensorflow.Keras;
-using Tensorflow.Keras.Engine;
-
-namespace Tensorflow
-{
- public static partial class keras
- {
- public static Preprocessing preprocessing => new Preprocessing();
- public static Sequence sequence = new Sequence();
- public static Sequential Sequential() => new Sequential();
- }
-}
diff --git a/src/TensorFlowNET.Core/APIs/tf.array.cs b/src/TensorFlowNET.Core/APIs/tf.array.cs
index ec17cecc1..b529cd319 100644
--- a/src/TensorFlowNET.Core/APIs/tf.array.cs
+++ b/src/TensorFlowNET.Core/APIs/tf.array.cs
@@ -14,12 +14,12 @@ You may obtain a copy of the License at
limitations under the License.
******************************************************************************/
-using NumSharp;
-using System;
+using Tensorflow.NumPy;
using System.Collections.Generic;
using System.Diagnostics;
using System.Linq;
using static Tensorflow.Binding;
+using Tensorflow.Operations;
namespace Tensorflow
{
@@ -29,6 +29,10 @@ public partial class tensorflow
/// A convenient alias for None, useful for indexing arrays.
///
public Slice newaxis = Slice.NewAxis;
+ ///
+ /// A convenient alias for ...
+ ///
+ public Slice ellipsis = Slice.Ellipsis;
///
/// BatchToSpace for N-D tensors of type T.
@@ -40,7 +44,8 @@ public partial class tensorflow
///
///
public Tensor batch_to_space_nd(T input, int[] block_shape, int[,] crops, string name = null)
- => gen_array_ops.batch_to_space_nd(input, block_shape, crops, name: name);
+ => gen_array_ops.batch_to_space_nd(ops.convert_to_tensor(input), ops.convert_to_tensor(block_shape),
+ ops.convert_to_tensor(crops), name: name);
///
/// Apply boolean mask to tensor.
@@ -48,7 +53,7 @@ public Tensor batch_to_space_nd(T input, int[] block_shape, int[,] crops, str
///
///
/// N-D tensor.
- /// K-D boolean tensor, K <= N and K must be known statically.
+ /// K-D boolean tensor, K <= N and K must be known statically.
///
/// A 0-D int Tensor representing the axis in tensor to mask from.
/// (N-K+1)-dimensional tensor populated by entries in tensor corresponding to True values in mask.
@@ -62,7 +67,7 @@ public Tensor boolean_mask(T1 tensor, T2 mask, string name = "boolean_ma
///
///
///
- public Tensor broadcast_to(Tensor input, TensorShape shape, string name = null)
+ public Tensor broadcast_to(Tensor input, Shape shape, string name = null)
=> gen_array_ops.broadcast_to(input, shape, name: name);
public Tensor check_numerics(Tensor tensor, string message, string name = null)
@@ -75,19 +80,18 @@ public Tensor check_numerics(Tensor tensor, string message, string name = null)
///
///
/// A `Tensor` resulting from concatenation of the input tensors.
- public Tensor concat(IList values, int axis, string name = "concat")
+ public Tensor concat(IEnumerable values, int axis, string name = "concat")
{
- if (values.Count == 1)
+ if (values.Count() == 1)
{
return tf_with(ops.name_scope(name), scope =>
{
var tensor = ops.convert_to_tensor(axis, name: "concat_dim", dtype: dtypes.int32);
- Debug.Assert(tensor.TensorShape.ndim == 0);
- return identity(values[0], name: scope);
+ Debug.Assert(tensor.shape.ndim == 0);
+ return identity(values.First(), name: scope);
});
}
-
- return gen_array_ops.concat_v2(values.ToArray(), axis, name: name);
+ return array_ops.concat(values.ToArray(), axis, name: name);
}
///
@@ -96,13 +100,12 @@ public Tensor concat(IList values, int axis, string name = "concat")
///
///
///
- ///
///
/// A `Tensor` with the same data as `input`, but its shape has an additional
/// dimension of size 1 added.
///
- public Tensor expand_dims(Tensor input, int axis = -1, string name = null, int dim = -1)
- => array_ops.expand_dims(input, axis, name, dim);
+ public Tensor expand_dims(Tensor input, int axis = -1, string name = null)
+ => array_ops.expand_dims(input, axis, name);
///
/// Creates a tensor filled with a scalar value.
@@ -112,7 +115,10 @@ public Tensor expand_dims(Tensor input, int axis = -1, string name = null, int d
///
///
public Tensor fill(Tensor dims, T value, string name = null)
- => gen_array_ops.fill(dims, value, name: name);
+ => gen_array_ops.fill(dims, ops.convert_to_tensor(value), name: name);
+
+ public Tensor fill(Shape dims, T value, string name = null)
+ => array_ops.fill(dims, value, name: name);
///
/// Return a tensor with the same shape and contents as input.
@@ -132,7 +138,17 @@ public Tensor identity(Tensor input, string name = null)
///
///
public Tensor gather(Tensor @params, Tensor indices, string name = null, int axis = 0)
- => array_ops.gather(@params, indices, name: name, axis: axis);
+ => array_ops.gather(@params, indices, name: name, axis: ops.convert_to_tensor(axis));
+
+ ///
+ /// Gather slices from `params` into a Tensor with shape specified by `indices`.
+ ///
+ ///
+ ///
+ ///
+ ///
+ public Tensor gather_nd(Tensor @params, Tensor indices, string name = null)
+ => gen_array_ops.gather_nd(@params, indices, name: name);
///
/// Return the elements, either from `x` or `y`, depending on the `condition`.
@@ -149,21 +165,24 @@ public Tensor where(Tensor condition, Tx x, Ty y, string name = null)
///
///
///
- public Tensor transpose(T1 a, int[] perm = null, string name = "transpose", bool conjugate = false)
+ public Tensor transpose(T1 a, Axis perm = null, string name = "transpose", bool conjugate = false)
=> array_ops.transpose(a, perm, name, conjugate);
///
/// Reverses specific dimensions of a tensor.
///
///
- ///
+ /// The indices of the dimensions to reverse. Must be in the range [-rank(tensor), rank(tensor)).
///
///
- public Tensor reverse(Tensor tensor, int[] axis, string name = null)
- => gen_array_ops.reverse(tensor, axis, name: name);
-
- public Tensor reverse(Tensor tensor, Tensor axis, string name = null)
- => gen_array_ops.reverse(tensor, axis, name: name);
+ public Tensor reverse(Tensor tensor, Axis axis, string name = null)
+ {
+ if (axis.IsScalar)
+ {
+ axis = new Axis(axis.axis);
+ }
+ return array_ops.reverse(tensor, axis, name: name);
+ }
///
/// Returns the rank of a tensor.
@@ -183,10 +202,14 @@ public Tensor rank(Tensor input, string name = null)
/// A name for the operation (optional).
/// A `Tensor` the same type as `input`.
public Tensor slice(Tensor input, Tb[] begin, Ts[] size, string name = null)
- => array_ops.slice(input, begin, size, name: name);
+ => array_ops.slice(input, begin.Select(x => ops.convert_to_tensor(x)).ToArray(),
+ size.Select(x => ops.convert_to_tensor(x)).ToArray(), name: name);
+
+ public Tensor squeeze(Tensor input, int axis, string name = null, int squeeze_dims = -1)
+ => array_ops.squeeze(input, new[] { axis }, name);
public Tensor squeeze(Tensor input, int[] axis = null, string name = null, int squeeze_dims = -1)
- => gen_array_ops.squeeze(input, axis, name);
+ => array_ops.squeeze(input, axis, name);
///
/// Stacks a list of rank-`R` tensors into one rank-`(R+1)` tensor.
@@ -212,12 +235,15 @@ public Tensor stack(object values, int axis = 0, string name = "stack")
public Tensor ones_like(Tensor tensor, TF_DataType dtype = TF_DataType.DtInvalid, string name = null, bool optimize = true)
=> array_ops.ones_like(tensor, dtype: dtype, name: name, optimize: optimize);
+ public Tensor ones_like(NDArray nd, TF_DataType dtype = TF_DataType.DtInvalid, string name = null, bool optimize = true)
+ => array_ops.ones_like(nd, dtype: dtype, name: name, optimize: optimize);
+
public Tensor one_hot(Tensor indices, int depth,
Tensor on_value = null,
Tensor off_value = null,
TF_DataType dtype = TF_DataType.DtInvalid,
int axis = -1,
- string name = null) => array_ops.one_hot(indices, depth, dtype: dtype, axis: axis, name: name);
+ string name = null) => array_ops.one_hot(indices, ops.convert_to_tensor(depth), dtype: dtype, axis: axis, name: name);
///
/// Pads a tensor
@@ -237,13 +263,13 @@ public Tensor pad(Tensor tensor, Tensor paddings, string mode = "CONSTANT", stri
///
/// A `Tensor`. The default value to produce when output is not fed.
///
- /// A `tf.TensorShape` or list of `int`s. The (possibly partial) shape of
+ /// A `tf.Shape` or list of `int`s. The (possibly partial) shape of
/// the tensor.
///
/// A name for the operation (optional).
/// A `Tensor`. Has the same type as `input`.
public Tensor placeholder_with_default(T input, int[] shape, string name = null)
- => gen_array_ops.placeholder_with_default(input, shape, name: name);
+ => gen_array_ops.placeholder_with_default(ops.convert_to_tensor(input), shape, name: name);
///
/// Returns the shape of a tensor.
@@ -287,6 +313,9 @@ public Tensor[] unstack(Tensor value, int? num = null, int axis = 0, string name
public Tensor zeros_like(Tensor tensor, TF_DataType dtype = TF_DataType.DtInvalid, string name = null, bool optimize = true)
=> array_ops.zeros_like(tensor, dtype: dtype, name: name, optimize: optimize);
+ public Tensor zeros_like(NDArray nd, TF_DataType dtype = TF_DataType.DtInvalid, string name = null, bool optimize = true)
+ => array_ops.zeros_like(nd, dtype: dtype, name: name, optimize: optimize);
+
///
/// Stops gradient computation.
///
@@ -295,5 +324,27 @@ public Tensor zeros_like(Tensor tensor, TF_DataType dtype = TF_DataType.DtInvali
///
public Tensor stop_gradient(Tensor x, string name = null)
=> gen_array_ops.stop_gradient(x, name: name);
+
+ public TensorArray TensorArray(TF_DataType dtype, int size = 0, bool dynamic_size = false,
+ bool clear_after_read = true, Shape? element_shape = null, bool colocate_with_first_write_call = true,
+ bool infer_shape = true)
+ => tf.executing_eagerly() ?
+ new _EagerTensorArray(dtype, size: constant_op.constant(size), dynamic_size: dynamic_size,
+ clear_after_read: clear_after_read, element_shape: element_shape, infer_shape: infer_shape,
+ colocate_with_first_write_call: colocate_with_first_write_call) :
+ new _GraphTensorArray(dtype, size: constant_op.constant(size), dynamic_size: dynamic_size,
+ clear_after_read: clear_after_read, element_shape: element_shape, infer_shape: infer_shape,
+ colocate_with_first_write_call: colocate_with_first_write_call);
+
+ public TensorArray TensorArray(TF_DataType dtype, Tensor size, bool dynamic_size = false,
+ bool clear_after_read = true, Shape? element_shape = null, bool colocate_with_first_write_call = true,
+ bool infer_shape = true)
+ => tf.executing_eagerly() ?
+ new _EagerTensorArray(dtype, size: size, dynamic_size: dynamic_size,
+ clear_after_read: clear_after_read, element_shape: element_shape, infer_shape: infer_shape,
+ colocate_with_first_write_call: colocate_with_first_write_call) :
+ new _GraphTensorArray(dtype, size: size, dynamic_size: dynamic_size,
+ clear_after_read: clear_after_read, element_shape: element_shape, infer_shape: infer_shape,
+ colocate_with_first_write_call: colocate_with_first_write_call);
}
}
diff --git a/src/TensorFlowNET.Core/APIs/tf.audio.cs b/src/TensorFlowNET.Core/APIs/tf.audio.cs
new file mode 100644
index 000000000..573b11ec3
--- /dev/null
+++ b/src/TensorFlowNET.Core/APIs/tf.audio.cs
@@ -0,0 +1,37 @@
+/*****************************************************************************
+ Copyright 2021 Haiping Chen. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+******************************************************************************/
+
+using System.Collections.Generic;
+using Tensorflow.IO;
+
+namespace Tensorflow
+{
+ public partial class tensorflow
+ {
+ public AudioAPI audio { get; } = new AudioAPI();
+
+ public class AudioAPI
+ {
+ audio_ops audio_ops = new audio_ops();
+
+ public Tensors decode_wav(Tensor contents, int desired_channels = -1, int desired_samples = -1, string name = null)
+ => audio_ops.decode_wav(contents,
+ desired_channels: desired_channels,
+ desired_samples: desired_samples,
+ name: name);
+ }
+ }
+}
diff --git a/src/TensorFlowNET.Core/APIs/tf.autograph.cs b/src/TensorFlowNET.Core/APIs/tf.autograph.cs
new file mode 100644
index 000000000..55acac621
--- /dev/null
+++ b/src/TensorFlowNET.Core/APIs/tf.autograph.cs
@@ -0,0 +1,25 @@
+/*****************************************************************************
+ Copyright 2020 Haiping Chen. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+******************************************************************************/
+
+using Tensorflow.Graphs;
+
+namespace Tensorflow
+{
+ public partial class tensorflow
+ {
+ public AutoGraph autograph = new AutoGraph();
+ }
+}
diff --git a/src/TensorFlowNET.Core/APIs/tf.bitwise.cs b/src/TensorFlowNET.Core/APIs/tf.bitwise.cs
new file mode 100644
index 000000000..b05182447
--- /dev/null
+++ b/src/TensorFlowNET.Core/APIs/tf.bitwise.cs
@@ -0,0 +1,25 @@
+/*****************************************************************************
+ Copyright 2020 The TensorFlow.NET Authors. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+******************************************************************************/
+
+using Tensorflow.Operations;
+
+namespace Tensorflow
+{
+ public partial class tensorflow
+ {
+ public bitwise_ops bitwise = new bitwise_ops();
+ }
+}
diff --git a/src/TensorFlowNET.Core/APIs/tf.compat.cs b/src/TensorFlowNET.Core/APIs/tf.compat.cs
new file mode 100644
index 000000000..8a30badd9
--- /dev/null
+++ b/src/TensorFlowNET.Core/APIs/tf.compat.cs
@@ -0,0 +1,71 @@
+/*****************************************************************************
+ Copyright 2020 The TensorFlow.NET Authors. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+******************************************************************************/
+
+using Google.Protobuf;
+using System.Text;
+
+namespace Tensorflow
+{
+ public partial class tensorflow
+ {
+ public CompatApi compat { get; } = new CompatApi();
+
+ public class CompatApi
+ {
+ public CompatV1Api v1 { get; } = new CompatV1Api();
+
+ internal string as_text(string bytes_or_text, Encoding? encoding = null)
+ {
+ if(encoding is null) encoding = Encoding.UTF8;
+ return bytes_or_text;
+ }
+ internal string as_text(byte[] bytes_or_text, Encoding? encoding = null)
+ {
+ if(encoding is null) encoding = Encoding.UTF8;
+ return encoding.GetString(bytes_or_text);
+ }
+
+ internal string as_str(string bytes_or_text, Encoding? encoding = null)
+ {
+ return as_text(bytes_or_text, encoding);
+ }
+ internal string as_str(byte[] bytes_or_text, Encoding? encoding = null)
+ {
+ return as_text(bytes_or_text, encoding);
+ }
+
+ public ByteString as_bytes(ByteString bytes, Encoding encoding = null)
+ {
+ return bytes;
+ }
+ public ByteString as_bytes(byte[] bytes, Encoding encoding = null)
+ {
+ return ByteString.CopyFrom(bytes);
+ }
+ public ByteString as_bytes(string text, Encoding encoding = null)
+ {
+ if(encoding is null)
+ {
+ encoding = Encoding.UTF8;
+ }
+ return ByteString.CopyFrom(encoding.GetBytes(text));
+ }
+ }
+
+ public bool executing_eagerly()
+ => Context.executing_eagerly();
+ }
+}
diff --git a/src/TensorFlowNET.Core/APIs/tf.compat.v1.cs b/src/TensorFlowNET.Core/APIs/tf.compat.v1.cs
new file mode 100644
index 000000000..982e7ccce
--- /dev/null
+++ b/src/TensorFlowNET.Core/APIs/tf.compat.v1.cs
@@ -0,0 +1,60 @@
+/*****************************************************************************
+ Copyright 2020 The TensorFlow.NET Authors. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+******************************************************************************/
+
+using System.Collections.Generic;
+using static Tensorflow.Binding;
+
+namespace Tensorflow
+{
+ public class CompatV1Api
+ {
+ public void disable_eager_execution()
+ => tf.Context.graph_mode();
+
+ public IVariableV1 get_variable(string name,
+ Shape shape = null,
+ TF_DataType dtype = TF_DataType.DtInvalid,
+ object initializer = null, // IInitializer or Tensor
+ bool? trainable = null,
+ List collections = null,
+ bool? use_resource = null,
+ bool validate_shape = true,
+ VariableSynchronization synchronization = VariableSynchronization.Auto,
+ VariableAggregation aggregation = VariableAggregation.None)
+ {
+ var scope = Tensorflow.variable_scope.get_variable_scope();
+ var store = Tensorflow.variable_scope._get_default_variable_store();
+ return scope.get_variable(store,
+ name,
+ shape: shape,
+ dtype: dtype,
+ use_resource: use_resource,
+ validate_shape: validate_shape,
+ initializer: initializer,
+ trainable: trainable,
+ collections: collections);
+ }
+
+ public Operation global_variables_initializer()
+ {
+ var g = variables.global_variables();
+ return variables.variables_initializer(g.ToArray());
+ }
+
+ public Session Session()
+ => new Session().as_default();
+ }
+}
diff --git a/src/TensorFlowNET.Core/APIs/tf.config.cs b/src/TensorFlowNET.Core/APIs/tf.config.cs
new file mode 100644
index 000000000..3c30ffb48
--- /dev/null
+++ b/src/TensorFlowNET.Core/APIs/tf.config.cs
@@ -0,0 +1,32 @@
+/*****************************************************************************
+ Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+******************************************************************************/
+
+using Tensorflow.Contexts;
+using Tensorflow.Framework;
+
+namespace Tensorflow
+{
+ public partial class tensorflow
+ {
+ ///
+ /// Public API for tf.debugging namespace
+ /// https://www.tensorflow.org/api_docs/python/tf/debugging
+ /// More debugging instructions
+ /// https://developer.ibm.com/technologies/artificial-intelligence/tutorials/debug-tensorflow/
+ ///
+ public ConfigImpl config => new ConfigImpl();
+ }
+}
diff --git a/src/TensorFlowNET.Core/APIs/tf.control_flow.cs b/src/TensorFlowNET.Core/APIs/tf.control_flow.cs
index b2b5574ab..cd5a71e50 100644
--- a/src/TensorFlowNET.Core/APIs/tf.control_flow.cs
+++ b/src/TensorFlowNET.Core/APIs/tf.control_flow.cs
@@ -20,12 +20,16 @@ namespace Tensorflow
{
public partial class tensorflow
{
+ public Tensor cond(Tensor pred,
+ Tensor true_value,
+ Tensor false_false)
+ => control_flow_ops.cond(pred, () => true_value, () => false_false);
+
public Tensor cond(Tensor pred,
Func true_fn = null,
Func false_fn = null,
- bool strict = false,
string name = null)
- => control_flow_ops.cond(pred, true_fn, false_fn, strict: strict, name: name);
+ => control_flow_ops.cond(pred, true_fn, false_fn, name: name);
///
/// Create an op that groups multiple operations.
@@ -37,24 +41,33 @@ public Tensor cond(Tensor pred,
public Operation group(T[] inputs, string name = null) where T : ITensorOrOperation
=> control_flow_ops.group(inputs, name: name);
- /*public Tensor while_loop(Func cond, Func body, Tensor[] loop_vars,
- TensorShape shape_invariants = null,
+ public Tensor while_loop(Func cond,
+ Func body,
+ Tensor loop_vars,
+ int parallel_iterations = 10)
+ {
+ Func cond1 = x
+ => cond(x[0]);
+
+ Func body1 = x
+ => new[] { body(x[0]) };
+
+ var results = control_flow_ops.while_loop(cond1,
+ body1,
+ new[] { loop_vars });
+ return results[0];
+ }
+
+ public Tensor[] while_loop(Func cond,
+ Func body,
+ Tensors loop_vars,
int parallel_iterations = 10,
- bool back_prop = true,
- bool swap_memory = false,
- string name = null,
- int? maximum_iterations = null,
- bool return_same_structure = false)
+ string name = null)
=> control_flow_ops.while_loop(cond, body, loop_vars,
- shape_invariants: shape_invariants,
parallel_iterations: parallel_iterations,
- back_prop: back_prop,
- swap_memory: swap_memory,
- name: name,
- maximum_iterations: maximum_iterations,
- return_same_structure: return_same_structure);*/
+ name: name);
- public _ControlDependenciesController control_dependencies(ITensorOrOperation[] control_inputs)
+ public _ControlDependenciesController control_dependencies(ITensorOrOperation[] control_inputs)
=> ops.control_dependencies(control_inputs);
}
}
diff --git a/src/TensorFlowNET.Core/APIs/tf.data.cs b/src/TensorFlowNET.Core/APIs/tf.data.cs
new file mode 100644
index 000000000..6c41a8393
--- /dev/null
+++ b/src/TensorFlowNET.Core/APIs/tf.data.cs
@@ -0,0 +1,31 @@
+/*****************************************************************************
+ Copyright 2020 The TensorFlow.NET Authors. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+******************************************************************************/
+
+namespace Tensorflow
+{
+ public partial class tensorflow
+ {
+ public DataOps data { get; } = new DataOps();
+
+ public class DataOps
+ {
+ public int AUTOTUNE = -1;
+ public int INFINITE_CARDINALITY = -1;
+ public int UNKNOWN_CARDINALITY = -2;
+ public DatasetManager Dataset { get; } = new DatasetManager();
+ }
+ }
+}
diff --git a/src/TensorFlowNET.Core/APIs/tf.data_flow.cs b/src/TensorFlowNET.Core/APIs/tf.data_flow.cs
index 3ea6a70d0..e4c0a83cc 100644
--- a/src/TensorFlowNET.Core/APIs/tf.data_flow.cs
+++ b/src/TensorFlowNET.Core/APIs/tf.data_flow.cs
@@ -14,8 +14,6 @@ You may obtain a copy of the License at
limitations under the License.
******************************************************************************/
-using System;
-
namespace Tensorflow
{
public partial class tensorflow
diff --git a/src/TensorFlowNET.Core/APIs/tf.debugging.cs b/src/TensorFlowNET.Core/APIs/tf.debugging.cs
index 8e2205948..b3b3529e4 100644
--- a/src/TensorFlowNET.Core/APIs/tf.debugging.cs
+++ b/src/TensorFlowNET.Core/APIs/tf.debugging.cs
@@ -14,31 +14,22 @@ You may obtain a copy of the License at
limitations under the License.
******************************************************************************/
+using Tensorflow.Debugging;
+using static Tensorflow.Binding;
+
namespace Tensorflow
{
public partial class tensorflow
{
///
- /// Assert the condition `x == y` holds element-wise.
+ /// Public API for tf.debugging namespace
+ /// https://www.tensorflow.org/api_docs/python/tf/debugging
+ /// More debugging instructions
+ /// https://developer.ibm.com/technologies/artificial-intelligence/tutorials/debug-tensorflow/
///
- ///
- ///
- ///
- ///
- ///
- ///
- ///
- ///
- public Tensor assert_equal(T1 t1,
- T2 t2,
- object[] data = null,
- string message = null,
- string name = null)
- => check_ops.assert_equal(t1,
- t2,
- data: data,
- message: message,
- name: name);
+ public DebugImpl debugging => new DebugImpl();
+ public void print(Tensor input)
+ => tf.logging.print_v2(input);
}
}
diff --git a/src/TensorFlowNET.Core/APIs/tf.exp.cs b/src/TensorFlowNET.Core/APIs/tf.exp.cs
deleted file mode 100644
index 56ea1898e..000000000
--- a/src/TensorFlowNET.Core/APIs/tf.exp.cs
+++ /dev/null
@@ -1,25 +0,0 @@
-/*****************************************************************************
- Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-******************************************************************************/
-
-namespace Tensorflow
-{
- public partial class tensorflow
- {
- public Tensor exp(Tensor x,
- string name = null) => gen_math_ops.exp(x, name);
-
- }
-}
diff --git a/src/TensorFlowNET.Core/APIs/tf.gradients.cs b/src/TensorFlowNET.Core/APIs/tf.gradients.cs
index e99c77338..d722cb143 100644
--- a/src/TensorFlowNET.Core/APIs/tf.gradients.cs
+++ b/src/TensorFlowNET.Core/APIs/tf.gradients.cs
@@ -1,5 +1,5 @@
/*****************************************************************************
- Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
+ Copyright 2020 The TensorFlow.NET Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,14 +14,32 @@ You may obtain a copy of the License at
limitations under the License.
******************************************************************************/
+using System.Collections.Generic;
using Tensorflow.Gradients;
namespace Tensorflow
{
public partial class tensorflow
{
- public GradientTape GradientTape()
- => new GradientTape();
+ GradientTape _tapeSet;
+
+ ///
+ /// Record operations for automatic differentiation.
+ ///
+ ///
+ ///
+ /// Tape set
+ public GradientTape GradientTape(bool persistent = false,
+ bool watch_accessed_variables = true)
+ {
+ var tape = _tapeSet.PushTape(persistent: persistent,
+ watch_accessed_variables: watch_accessed_variables);
+ tape.StartRecord();
+ return _tapeSet;
+ }
+
+ public Stack GetTapeSet()
+ => _tapeSet.GetTapeSet();
public Tensor[] gradients(Tensor[] ys,
Tensor[] xs,
@@ -32,11 +50,11 @@ public Tensor[] gradients(Tensor[] ys,
int? aggregation_method = null,
Tensor[] stop_gradients = null)
{
- return gradients_util._GradientsHelper(ys,
- xs,
- grad_ys,
- name,
- colocate_gradients_with_ops,
+ return gradients_util._GradientsHelper(ys,
+ xs,
+ grad_ys,
+ name,
+ colocate_gradients_with_ops,
gate_gradients,
stop_gradients: stop_gradients);
}
diff --git a/src/TensorFlowNET.Core/APIs/tf.graph.cs b/src/TensorFlowNET.Core/APIs/tf.graph.cs
index 05851b6b5..c1b033aee 100644
--- a/src/TensorFlowNET.Core/APIs/tf.graph.cs
+++ b/src/TensorFlowNET.Core/APIs/tf.graph.cs
@@ -20,25 +20,18 @@ namespace Tensorflow
{
public partial class tensorflow
{
- public graph_util_impl graph_util => new graph_util_impl();
- public GraphTransformer graph_transforms => new GraphTransformer();
+ public graph_util_impl graph_util { get; } = new graph_util_impl();
+ public GraphTransformer graph_transforms { get; } = new GraphTransformer();
public GraphKeys GraphKeys { get; } = new GraphKeys();
- public void reset_default_graph()
+ public void reset_default_graph()
=> ops.reset_default_graph();
public Graph get_default_graph()
- {
- return ops.get_default_graph();
- }
+ => ops.get_default_graph();
- ///
- /// Equivalent to but does not create a new graph if it there is none.
- ///
public Graph peak_default_graph()
- {
- return ops.default_graph_stack.peak_controller();
- }
+ => ops.peak_default_graph();
///
/// Creates a new graph.
diff --git a/src/TensorFlowNET.Core/APIs/tf.image.cs b/src/TensorFlowNET.Core/APIs/tf.image.cs
index 13fd678fe..41ef52967 100644
--- a/src/TensorFlowNET.Core/APIs/tf.image.cs
+++ b/src/TensorFlowNET.Core/APIs/tf.image.cs
@@ -1,4 +1,4 @@
-/*****************************************************************************
+/*****************************************************************************
Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,8 +14,11 @@ You may obtain a copy of the License at
limitations under the License.
******************************************************************************/
-using System.Collections.Generic;
-using Tensorflow.IO;
+using OneOf.Types;
+using System;
+using System.Buffers.Text;
+using Tensorflow.Contexts;
+using static Tensorflow.Binding;
namespace Tensorflow
{
@@ -25,17 +28,246 @@ public partial class tensorflow
public class image_internal
{
- public Tensor decode_jpeg(Tensor contents,
- int channels = 0,
- int ratio = 1,
- bool fancy_upscaling = true,
- bool try_recover_truncated = false,
- float acceptable_fraction = 1,
- string dct_method = "",
+ public Tensor random_flip_up_down(Tensor image, int seed = 0)
+ => image_ops_impl.random_flip_up_down(image, seed);
+
+ public Tensor random_flip_left_right(Tensor image, int seed = 0)
+ => image_ops_impl.random_flip_left_right(image, seed);
+
+ public Tensor flip_left_right(Tensor image)
+ => image_ops_impl.flip_left_right(image);
+
+ public Tensor flip_up_down(Tensor image)
+ => image_ops_impl.flip_up_down(image);
+
+ public Tensor rot90(Tensor image, int k = 1, string name = null)
+ => image_ops_impl.rot90(image, k, name);
+
+ public Tensor transpose(Tensor image, string name = null)
+ => image_ops_impl.transpose(image, name);
+
+ public Tensor central_crop(Tensor image, float central_fraction)
+ => image_ops_impl.central_crop(image, central_fraction);
+
+ public Tensor pad_to_bounding_box(Tensor image, int offset_height, int offset_width, int target_height, int target_width)
+ => image_ops_impl.pad_to_bounding_box(image, offset_height, offset_width, target_height, target_width);
+
+ public Tensor crop_to_bounding_box(Tensor image, int offset_height, int offset_width, int target_height, int target_width)
+ => image_ops_impl.crop_to_bounding_box(image, offset_height, offset_width, target_height, target_width);
+
+ public Tensor resize_image_with_crop_or_pad(Tensor image, object target_height, object target_width)
+ => image_ops_impl.resize_image_with_crop_or_pad(image, target_height, target_width);
+
+ public Tensor resize_images(Tensor images, Tensor size, string method = ResizeMethod.BILINEAR, bool preserve_aspect_ratio = false, bool antialias = false,
+ string name = null)
+ => image_ops_impl.resize_images(images, size, method, preserve_aspect_ratio, antialias, name);
+
+ public Tensor resize_images_v2(Tensor images, Shape size, string method = ResizeMethod.BILINEAR, bool preserve_aspect_ratio = false, bool antialias = false,
+ string name = null)
+ => image_ops_impl.resize_images_v2(images, size, method, preserve_aspect_ratio, antialias, name);
+
+ public Tensor resize_images_v2(Tensor images, Tensor size, string method = ResizeMethod.BILINEAR, bool preserve_aspect_ratio = false, bool antialias = false,
+ string name = null)
+ => image_ops_impl.resize_images_v2(images, size, method, preserve_aspect_ratio, antialias, name);
+
+ public Tensor resize_images_with_pad(Tensor image, int target_height, int target_width, string method, bool antialias)
+ => image_ops_impl.resize_images_with_pad(image, target_height, target_width, method, antialias);
+
+ public Tensor per_image_standardization(Tensor image)
+ => image_ops_impl.per_image_standardization(image);
+
+ public Tensor random_brightness(Tensor image, float max_delta, int seed = 0)
+ => image_ops_impl.random_brightness(image, max_delta, seed);
+
+ public Tensor random_contrast(Tensor image, float lower, float upper, int seed = 0)
+ => image_ops_impl.random_contrast(image, lower, upper, seed);
+
+ public Tensor adjust_brightness(Tensor image, Tensor delta)
+ => image_ops_impl.adjust_brightness(image, delta);
+
+ public Tensor adjust_contrast(Tensor images, Tensor contrast_factor)
+ => image_ops_impl.adjust_contrast(images, contrast_factor);
+
+ public Tensor adjust_gamma(Tensor image, int gamma = 1, int gain = 1)
+ => image_ops_impl.adjust_gamma(image, gamma, gain);
+
+ public Tensor rgb_to_grayscale(Tensor images, string name = null)
+ => image_ops_impl.rgb_to_grayscale(images, name);
+
+ public Tensor grayscale_to_rgb(Tensor images, string name = null)
+ => image_ops_impl.grayscale_to_rgb(images, name);
+
+ public Tensor random_hue(Tensor image, float max_delta, int seed = 0)
+ => image_ops_impl.random_hue(image, max_delta, seed);
+
+ public Tensor adjust_hue(Tensor image, Tensor delta, string name = null)
+ => image_ops_impl.adjust_hue(image, delta, name);
+
+ public Tensor random_jpeg_quality(Tensor image, float min_jpeg_quality, float max_jpeg_quality, int seed = 0)
+ => image_ops_impl.random_jpeg_quality(image, min_jpeg_quality, max_jpeg_quality, seed);
+
+ public Tensor adjust_jpeg_quality(Tensor image, Tensor jpeg_quality, string name = null)
+ => image_ops_impl.adjust_jpeg_quality(image, jpeg_quality, name);
+
+ public Tensor random_saturation(Tensor image, float lower, float upper, int seed = 0)
+ => image_ops_impl.random_saturation(image, lower, upper, seed);
+
+ public Tensor adjust_saturation(Tensor image, Tensor saturation_factor, string name = null)
+ => image_ops_impl.adjust_saturation(image, saturation_factor, name);
+
+ public Tensor total_variation(Tensor images, string name = null)
+ => image_ops_impl.total_variation(images, name);
+
+ public (Tensor, Tensor, Tensor) sample_distorted_bounding_box(Tensor image_size, Tensor bounding_boxes,
+ int seed = 0,
+ Tensor min_object_covered = null,
+ float[] aspect_ratio_range = null,
+ float[] area_range = null,
+ int max_attempts = 100,
+ bool use_image_if_no_bounding_boxes = false,
string name = null)
- => gen_image_ops.decode_jpeg(contents, channels: channels, ratio: ratio,
- fancy_upscaling: fancy_upscaling, try_recover_truncated: try_recover_truncated,
- acceptable_fraction: acceptable_fraction, dct_method: dct_method);
+ => image_ops_impl.sample_distorted_bounding_box_v2(image_size, bounding_boxes, seed, min_object_covered, aspect_ratio_range,
+ area_range, max_attempts, use_image_if_no_bounding_boxes, name);
+
+ public Tensor non_max_suppression(Tensor boxes, Tensor scores, Tensor max_output_size, float iou_threshold = 0.5f,
+ float score_threshold = -1f / 0f, /*float soft_nms_sigma = 0.0f,*/ string name = null)
+ => image_ops_impl.non_max_suppression(boxes, scores, max_output_size, iou_threshold, score_threshold, name);
+
+ public Tensor non_max_suppression_with_overlaps(Tensor overlaps, Tensor scores, Tensor max_output_size,
+ float overlap_threshold = 0.5f, float score_threshold = -1 / 0f, string name = null)
+ => image_ops_impl.non_max_suppression_with_overlaps(overlaps, scores, max_output_size, overlap_threshold, score_threshold, name);
+
+ public Tensor rgb_to_yiq(Tensor images)
+ => image_ops_impl.rgb_to_yiq(images);
+
+ public Tensor yiq_to_rgb(Tensor images)
+ => image_ops_impl.yiq_to_rgb(images);
+
+ public Tensor rgb_to_yuv(Tensor images)
+ => image_ops_impl.rgb_to_yuv(images);
+
+ public Tensor yuv_to_rgb(Tensor images)
+ => image_ops_impl.yuv_to_rgb(images);
+
+ public Tensor psnr(Tensor a, Tensor b, Tensor max_val, string name = null)
+ => image_ops_impl.psnr(a, b, max_val, name);
+
+ public Tensor ssim(Tensor img1, Tensor img2, float max_val = 1f, float filter_size = 11f, float filter_sigma = 1.5f,
+ float k1 = 0.01f, float k2 = 0.03f)
+ => image_ops_impl.ssim(img1, img2, max_val, filter_size, filter_sigma, k1, k2);
+
+ public Tensor ssim_multiscale(Tensor img1, Tensor img2, float max_val, float[] power_factors = null, float filter_size = 11f,
+ float filter_sigma = 1.5f, float k1 = 0.01f, float k2 = 0.03f)
+ => image_ops_impl.ssim_multiscale(img1, img2, max_val, power_factors, filter_size, filter_sigma, k1, k2);
+
+ public (Tensor, Tensor) image_gradients(Tensor image)
+ => image_ops_impl.image_gradients(image);
+
+ public Tensor sobel_edges(Tensor image)
+ => image_ops_impl.sobel_edges(image);
+
+ ///
+ /// Adjust contrast of RGB or grayscale images.
+ ///
+ /// Images to adjust. At least 3-D.
+ ///
+ /// A float multiplier for adjusting contrast.
+ /// The contrast-adjusted image or images.
+ public Tensor adjust_contrast(Tensor images, float contrast_factor, string name = null)
+ => gen_image_ops.adjust_contrastv2(images, contrast_factor, name);
+
+ ///
+ /// Adjust hue of RGB images.
+ ///
+ /// RGB image or images. The size of the last dimension must be 3.
+ /// float. How much to add to the hue channel.
+ /// A name for this operation (optional).
+ /// Adjusted image(s), same shape and DType as `image`.
+ /// if `delta` is not in the interval of `[-1, 1]`.
+ public Tensor adjust_hue(Tensor images, float delta, string name = null)
+ {
+ if (tf.Context.executing_eagerly())
+ {
+ if (delta < -1f || delta > 1f)
+ throw new ValueError("delta must be in the interval [-1, 1]");
+ }
+ return gen_image_ops.adjust_hue(images, delta, name: name);
+ }
+
+ ///
+ /// Adjust saturation of RGB images.
+ ///
+ /// RGB image or images. The size of the last dimension must be 3.
+ /// float. Factor to multiply the saturation by.
+ /// A name for this operation (optional).
+ /// Adjusted image(s), same shape and DType as `image`.
+ public Tensor adjust_saturation(Tensor image, float saturation_factor, string name = null)
+ => gen_image_ops.adjust_saturation(image, saturation_factor, name);
+
+ ///
+ /// Greedily selects a subset of bounding boxes in descending order of score.
+ ///
+ ///
+ /// A 4-D float `Tensor` of shape `[batch_size, num_boxes, q, 4]`. If `q`
+ /// is 1 then same boxes are used for all classes otherwise, if `q` is equal
+ /// to number of classes, class-specific boxes are used.
+ ///
+ ///
+ /// A 3-D float `Tensor` of shape `[batch_size, num_boxes, num_classes]`
+ /// representing a single score corresponding to each box(each row of boxes).
+ ///
+ ///
+ /// A scalar integer `Tensor` representing the
+ /// maximum number of boxes to be selected by non-max suppression per class
+ ///
+ ///
+ /// A int32 scalar representing maximum number of boxes retained
+ /// over all classes.Note that setting this value to a large number may
+ /// result in OOM error depending on the system workload.
+ ///
+ ///
+ /// A float representing the threshold for deciding whether boxes
+ /// overlap too much with respect to IOU.
+ ///
+ ///
+ /// A float representing the threshold for deciding when to
+ /// remove boxes based on score.
+ ///
+ ///
+ /// If false, the output nmsed boxes, scores and classes are
+ /// padded/clipped to `max_total_size`. If true, the output nmsed boxes, scores and classes are padded to be of length `max_size_per_class`*`num_classes`,
+ /// unless it exceeds `max_total_size` in which case it is clipped to `max_total_size`. Defaults to false.
+ ///
+ ///
+ /// If true, the coordinates of output nmsed boxes will be clipped
+ /// to[0, 1]. If false, output the box coordinates as it is. Defaults to true.
+ ///
+ ///
+ /// 'nmsed_boxes': A [batch_size, max_detections, 4] float32 tensor containing the non-max suppressed boxes.
+ /// 'nmsed_scores': A [batch_size, max_detections] float32 tensor containing the scores for the boxes.
+ /// 'nmsed_classes': A [batch_size, max_detections] float32 tensor containing the class for boxes.
+ /// 'valid_detections': A [batch_size] int32 tensor indicating the number of
+ /// valid detections per batch item. Only the top valid_detections[i] entries
+ /// in nms_boxes[i], nms_scores[i] and nms_class[i] are valid. The rest of the
+ /// entries are zero paddings.
+ ///
+ public (Tensor, Tensor, Tensor, Tensor) combined_non_max_suppression(
+ Tensor boxes,
+ Tensor scores,
+ int max_output_size_per_class,
+ int max_total_size,
+ float iou_threshold,
+ float score_threshold,
+ bool pad_per_class = false,
+ bool clip_boxes = true)
+ {
+ var iou_threshold_t = ops.convert_to_tensor(iou_threshold, TF_DataType.TF_FLOAT, name: "iou_threshold");
+ var score_threshold_t = ops.convert_to_tensor(score_threshold, TF_DataType.TF_FLOAT, name: "score_threshold");
+ var max_total_size_t = ops.convert_to_tensor(max_total_size);
+ var max_output_size_per_class_t = ops.convert_to_tensor(max_output_size_per_class);
+ return gen_image_ops.combined_non_max_suppression(boxes, scores, max_output_size_per_class_t, max_total_size_t,
+ iou_threshold_t, score_threshold_t, pad_per_class, clip_boxes);
+ }
///
/// Extracts crops from the input image tensor and resizes them using bilinear sampling or nearest neighbor sampling (possibly with aspect ratio change) to a common output size specified by crop_size. This is more general than the crop_to_bounding_box op which extracts a fixed size slice from the input image and does not allow resizing or aspect ratio change.
@@ -50,16 +282,54 @@ public Tensor decode_jpeg(Tensor contents,
/// A name for the operation (optional).
/// A 4-D tensor of shape [num_boxes, crop_height, crop_width, depth].
public Tensor crop_and_resize(Tensor image, Tensor boxes, Tensor box_ind, Tensor crop_size, string method = "bilinear", float extrapolation_value = 0f, string name = null) =>
- image_ops_impl.crop_and_resize(image, boxes, box_ind, crop_size, method, extrapolation_value, name);
+ gen_image_ops.crop_and_resize(image, boxes, box_ind, crop_size, method, extrapolation_value, name);
+ public Tensor decode_jpeg(Tensor contents,
+ int channels = 0,
+ int ratio = 1,
+ bool fancy_upscaling = true,
+ bool try_recover_truncated = false,
+ int acceptable_fraction = 1,
+ string dct_method = "",
+ string name = null)
+ => gen_image_ops.decode_jpeg(contents, channels: channels, ratio: ratio,
+ fancy_upscaling: fancy_upscaling, try_recover_truncated: try_recover_truncated,
+ acceptable_fraction: acceptable_fraction, dct_method: dct_method);
- public Tensor resize_bilinear(Tensor images, Tensor size, bool align_corners = false, string name = null)
- => gen_image_ops.resize_bilinear(images, size, align_corners: align_corners, name: name);
+ public Tensor extract_glimpse(Tensor input, Tensor size, Tensor offsets, bool centered = true, bool normalized = true,
+ bool uniform_noise = true, string name = null)
+ => image_ops_impl.extract_glimpse(input, size, offsets, centered, normalized, uniform_noise, name);
- public Tensor resize_images(Tensor images, Tensor size, ResizeMethod method = ResizeMethod.BILINEAR,
- bool align_corners = false, bool preserve_aspect_ratio = false, string name = null)
+ public (Tensor, Tensor, Tensor, Tensor) combined_non_max_suppression(Tensor boxes, Tensor scores, Tensor max_output_size_per_class,
+ Tensor max_total_size, float iou_threshold = 0.5f, float score_threshold = -1f / 0f, bool pad_per_class = false, bool clip_boxes = true,
+ string name = null)
+ => image_ops_impl.combined_non_max_suppression(boxes, scores, max_output_size_per_class, max_total_size, iou_threshold, score_threshold,
+ pad_per_class, clip_boxes, name);
+
+ public (Tensor, Tensor) non_max_suppression_padded(Tensor boxes, Tensor scores, Tensor max_output_size,
+ float iou_threshold = 0.5f,
+ float score_threshold = -1f / 0f,
+ bool pad_to_max_output_size = false,
+ string name = null,
+ bool sorted_input = false,
+ bool canonicalized_coordinates = false,
+ int tile_size = 512)
+ => image_ops_impl.non_max_suppression_padded(boxes, scores, max_output_size, iou_threshold, score_threshold, pad_to_max_output_size,
+ name, sorted_input, canonicalized_coordinates, tile_size);
+
+ public Tensor resize(Tensor image, Shape size, string method = ResizeMethod.BILINEAR)
+ => image_ops_impl.resize_images_v2(image, size, method: method);
+
+ public Tensor resize(Tensor image, Tensor size, string method = ResizeMethod.BILINEAR)
+ => image_ops_impl.resize_images_v2(image, size, method: method);
+
+ public Tensor resize_bilinear(Tensor images, Tensor size, bool align_corners = false, bool half_pixel_centers = false, string name = null)
+ => gen_image_ops.resize_bilinear(images, size, align_corners: align_corners, half_pixel_centers: half_pixel_centers, name: name);
+
+ public Tensor resize_images(Tensor images, Tensor size, string method = ResizeMethod.BILINEAR,
+ bool preserve_aspect_ratio = false, string name = null)
=> image_ops_impl.resize_images(images, size, method: method,
- align_corners: align_corners, preserve_aspect_ratio: preserve_aspect_ratio, name: name);
+ preserve_aspect_ratio: preserve_aspect_ratio, name: name);
public Tensor convert_image_dtype(Tensor image, TF_DataType dtype, bool saturate = false, string name = null)
=> gen_image_ops.convert_image_dtype(image, dtype, saturate: saturate, name: name);
@@ -69,6 +339,13 @@ public Tensor decode_image(Tensor contents, int channels = 0, TF_DataType dtype
=> image_ops_impl.decode_image(contents, channels: channels, dtype: dtype,
name: name, expand_animations: expand_animations);
+ public Tensor encode_png(Tensor contents, string name = null)
+ => image_ops_impl.encode_png(contents, name: name);
+
+ public Tensor encode_jpeg(Tensor contents, string name = null)
+ => image_ops_impl.encode_jpeg(contents, name: name);
+
+
///
/// Convenience function to check if the 'contents' encodes a JPEG image.
///
@@ -91,6 +368,9 @@ public Tensor resize_nearest_neighbor(Tensor images, Tsize size, bool ali
string name = null, bool half_pixel_centers = false)
=> image_ops_impl.resize_nearest_neighbor(images, size, align_corners: align_corners,
name: name, half_pixel_centers: half_pixel_centers);
+
+ public Tensor draw_bounding_boxes(Tensor images, Tensor boxes, Tensor colors = null, string name = null)
+ => image_ops_impl.draw_bounding_boxes(images, boxes, colors, name);
}
}
}
diff --git a/src/TensorFlowNET.Core/APIs/tf.init.cs b/src/TensorFlowNET.Core/APIs/tf.init.cs
index db2ea1b15..8635f6620 100644
--- a/src/TensorFlowNET.Core/APIs/tf.init.cs
+++ b/src/TensorFlowNET.Core/APIs/tf.init.cs
@@ -20,12 +20,15 @@ namespace Tensorflow
{
public partial class tensorflow
{
- public IInitializer constant_initializer(T value, TF_DataType dtype = TF_DataType.TF_FLOAT, bool verify_shape = false)
+ public InitializersImpl initializers { get; } = new InitializersImpl();
+
+ public IInitializer constant_initializer(T value, TF_DataType dtype = TF_DataType.TF_FLOAT, bool verify_shape = false)
=> new Constant(value, dtype: dtype, verify_shape: verify_shape);
public IInitializer zeros_initializer => new Zeros();
public IInitializer ones_initializer => new Ones();
public IInitializer glorot_uniform_initializer => new GlorotUniform();
- public IInitializer uniform_initializer => new RandomUniform();
+ public IInitializer random_uniform_initializer => new RandomUniform();
+ public IInitializer orthogonal_initializer => new Orthogonal();
public variable_scope variable_scope(string name,
string default_name = null,
@@ -68,19 +71,34 @@ public IInitializer random_normal_initializer(float mean = 0.0f,
///
///
///
- ///
+ ///
///
///
///
public IInitializer variance_scaling_initializer(float factor = 1.0f,
- string mode = "FAN_IN",
- bool uniform = false,
+ string mode = "fan_in",
+ string distribution = "truncated_normal",
int? seed = null,
TF_DataType dtype = TF_DataType.TF_FLOAT) => new VarianceScaling(
- factor: factor,
+ scale: factor,
mode: mode,
- uniform: uniform,
+ distribution: distribution,
seed: seed,
dtype: dtype);
+
+ public class InitializersImpl
+ {
+ public IInitializer random_normal_initializer(float mean = 0.0f,
+ float stddev = 0.05f,
+ int? seed = null,
+ TF_DataType dtype = TF_DataType.TF_FLOAT) => new RandomNormal(mean: mean,
+ stddev: stddev,
+ seed: seed,
+ dtype: dtype);
+
+ public IInitializer zeros_initializer(Shape shape = null,
+ TF_DataType dtype = TF_DataType.TF_FLOAT) => new Zeros(shape: shape,
+ dtype: dtype);
+ }
}
}
diff --git a/src/TensorFlowNET.Core/APIs/tf.io.cs b/src/TensorFlowNET.Core/APIs/tf.io.cs
index 40da04b13..ea1e44b28 100644
--- a/src/TensorFlowNET.Core/APIs/tf.io.cs
+++ b/src/TensorFlowNET.Core/APIs/tf.io.cs
@@ -16,19 +16,51 @@ limitations under the License.
using System.Collections.Generic;
using Tensorflow.IO;
+using Tensorflow.Operations;
namespace Tensorflow
{
public partial class tensorflow
{
+ public IoApi io { get; } = new IoApi();
+
+ public class IoApi
+ {
+ io_ops ops;
+ public GFile gfile;
+ public IoApi()
+ {
+ ops = new io_ops();
+ gfile = new GFile();
+ }
+
+ public Tensor read_file(string filename, string name = null)
+ => ops.read_file(filename, name);
+
+ public Tensor read_file(Tensor filename, string name = null)
+ => ops.read_file(filename, name);
+
+ public Operation save_v2(Tensor prefix, string[] tensor_names,
+ string[] shape_and_slices, Tensor[] tensors, string name = null)
+ => ops.save_v2(prefix, tensor_names, shape_and_slices, tensors, name: name);
+
+ public Tensor[] restore_v2(Tensor prefix, string[] tensor_names,
+ string[] shape_and_slices, TF_DataType[] dtypes, string name = null)
+ => ops.restore_v2(prefix, tensor_names, shape_and_slices, dtypes, name: name);
+
+ public Operation write_file(string filename, Tensor conentes, string name = null)
+ => write_file(Tensorflow.ops.convert_to_tensor(filename, TF_DataType.TF_STRING), conentes, name);
+
+ public Operation write_file(Tensor filename, Tensor conentes, string name = null)
+ => gen_ops.write_file(filename, conentes, name);
+ }
+
public GFile gfile = new GFile();
- public Tensor read_file(string filename, string name = null) => gen_io_ops.read_file(filename, name);
- public Tensor read_file(Tensor filename, string name = null) => gen_io_ops.read_file(filename, name);
public ITensorOrOperation[] import_graph_def(GraphDef graph_def,
Dictionary input_map = null,
string[] return_elements = null,
string name = null,
- OpList producer_op_list = null) => importer.import_graph_def(graph_def, input_map, return_elements, name, producer_op_list);
+ OpList producer_op_list = null) => importer.import_graph_def(graph_def, input_map, return_elements, name: name, producer_op_list: producer_op_list);
}
}
diff --git a/src/TensorFlowNET.Core/APIs/tf.layers.cs b/src/TensorFlowNET.Core/APIs/tf.layers.cs
deleted file mode 100644
index e62d5fa25..000000000
--- a/src/TensorFlowNET.Core/APIs/tf.layers.cs
+++ /dev/null
@@ -1,236 +0,0 @@
-/*****************************************************************************
- Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-******************************************************************************/
-
-using System.Collections.Generic;
-using System.Linq;
-using NumSharp;
-using Tensorflow.Keras.Layers;
-using Tensorflow.Operations.Activation;
-using static Tensorflow.Binding;
-
-namespace Tensorflow
-{
- public partial class tensorflow
- {
- public layers_internal layers { get; } = new layers_internal();
-
- public class layers_internal
- {
- public Tensor conv2d(Tensor inputs,
- int filters,
- int[] kernel_size,
- int[] strides = null,
- string padding = "valid",
- string data_format= "channels_last",
- int[] dilation_rate = null,
- bool use_bias = true,
- IActivation activation = null,
- IInitializer kernel_initializer = null,
- IInitializer bias_initializer = null,
- bool trainable = true,
- string name = null)
- {
- if (strides == null)
- strides = new int[] { 1, 1 };
- if (dilation_rate == null)
- dilation_rate = new int[] { 1, 1 };
- if (bias_initializer == null)
- bias_initializer = tf.zeros_initializer;
-
- var layer = new Conv2D(filters,
- kernel_size: kernel_size,
- strides: strides,
- padding: padding,
- data_format: data_format,
- dilation_rate: dilation_rate,
- activation: activation,
- use_bias: use_bias,
- kernel_initializer: kernel_initializer,
- bias_initializer: bias_initializer,
- trainable: trainable,
- name: name);
-
- return layer.apply(inputs).Item1;
- }
-
- ///
- /// Functional interface for the batch normalization layer.
- /// http://arxiv.org/abs/1502.03167
- ///
- ///
- ///
- ///
- ///
- ///
- ///
- ///
- ///
- ///
- ///
- ///
- ///
- ///
- ///
- ///
- ///
- public Tensor batch_normalization(Tensor inputs,
- int axis = -1,
- float momentum = 0.99f,
- float epsilon = 0.001f,
- bool center = true,
- bool scale = true,
- IInitializer beta_initializer = null,
- IInitializer gamma_initializer = null,
- IInitializer moving_mean_initializer = null,
- IInitializer moving_variance_initializer = null,
- Tensor training = null,
- bool trainable = true,
- string name = null,
- bool renorm = false,
- float renorm_momentum = 0.99f)
- {
- var layer = new BatchNormalization(
- axis: axis,
- momentum: momentum,
- epsilon: epsilon,
- center: center,
- scale: scale,
- beta_initializer: beta_initializer,
- gamma_initializer: gamma_initializer,
- moving_mean_initializer: moving_mean_initializer,
- moving_variance_initializer: moving_variance_initializer,
- renorm: renorm,
- renorm_momentum: renorm_momentum,
- trainable: trainable,
- name: name);
-
- return layer.apply(inputs, training: training).Item1;
- }
-
- ///
- /// Max pooling layer for 2D inputs (e.g. images).
- ///
- /// The tensor over which to pool. Must have rank 4.
- ///
- ///
- ///
- ///
- ///
- ///
- public Tensor max_pooling2d(Tensor inputs,
- int[] pool_size,
- int[] strides,
- string padding = "valid",
- string data_format = "channels_last",
- string name = null)
- {
- var layer = new MaxPooling2D(pool_size: pool_size,
- strides: strides,
- padding: padding,
- data_format: data_format,
- name: name);
-
- return layer.apply(inputs).Item1;
- }
-
- ///
- /// Densely-connected layer class. aka fully-connected
- /// `outputs = activation(inputs * kernel + bias)`
- ///
- ///
- /// Python integer, dimensionality of the output space.
- ///
- /// Boolean, whether the layer uses a bias.
- ///
- ///
- ///
- ///
- ///
- ///
- public Tensor dense(Tensor inputs,
- int units,
- IActivation activation = null,
- bool use_bias = true,
- IInitializer kernel_initializer = null,
- IInitializer bias_initializer = null,
- bool trainable = true,
- string name = null,
- bool? reuse = null)
- {
- if (bias_initializer == null)
- bias_initializer = tf.zeros_initializer;
-
- var layer = new Dense(units, activation,
- use_bias: use_bias,
- bias_initializer: bias_initializer,
- kernel_initializer: kernel_initializer,
- trainable: trainable,
- name: name);
-
- return layer.apply(inputs).Item1;
- }
-
- ///
- /// Flattens an input tensor while preserving the batch axis (axis 0).
- ///
- /// Tensor input.
- /// The name of the layer.
- ///
- /// A string, one of `channels_last` (default) or `channels_first`.
- /// The ordering of the dimensions in the inputs.
- /// `channels_last` corresponds to inputs with shape
- /// `(batch, height, width, channels)` while `channels_first` corresponds to
- /// inputs with shape `(batch, channels, height, width)`.
- ///
- ///
- public Tensor flatten(Tensor inputs,
- string name = null,
- string data_format = "channels_last")
- {
- var input_shape = inputs.shape;
- if (inputs.shape.Length == 0)
- throw new ValueError($"Input 0 of layer flatten is incompatible with the layer: : expected min_ndim={1}, found ndim={0}. Full shape received: ()");
-
- var premutation = new List() {0};
- if (data_format == "channels_first" && inputs.NDims > 1)
- {
- premutation.AddRange(Binding.range(2, inputs.NDims));
- premutation.Add(1);
- inputs = array_ops.transpose(inputs, premutation.ToArray());
- }
-
- var ret = array_ops.reshape(inputs, compute_output_shape(input_shape));
- //ret.set_shape(compute_output_shape(ret.shape));
- return ret;
-
- int[] compute_output_shape(int[] inputshape)
- {
- if (inputshape == null || inputshape.Length == 0)
- inputshape = new int[] {1};
-
- if (inputshape.Skip(1).All(d => d > 0))
- {
- int[] output_shape = new int[2];
- output_shape[0] = inputshape[0];
- output_shape[1] = inputshape.Skip(1).Aggregate(1, (acc, rhs) => acc*rhs); //calculate size of all the rest dimensions
- return output_shape;
- } else
- return new int[] {inputshape[0], -1}; //-1 == Binding.None
- }
- }
- }
- }
-}
diff --git a/src/TensorFlowNET.Core/APIs/tf.linalg.cs b/src/TensorFlowNET.Core/APIs/tf.linalg.cs
index 398fd5087..32f64ec35 100644
--- a/src/TensorFlowNET.Core/APIs/tf.linalg.cs
+++ b/src/TensorFlowNET.Core/APIs/tf.linalg.cs
@@ -13,18 +13,99 @@ You may obtain a copy of the License at
See the License for the specific language governing permissions and
limitations under the License.
******************************************************************************/
+using Tensorflow.NumPy;
+using static Tensorflow.Binding;
namespace Tensorflow
{
public partial class tensorflow
{
+ public LinalgApi linalg { get; } = new LinalgApi();
+
+ public class LinalgApi
+ {
+ linalg_ops ops = new linalg_ops();
+
+ public Tensor einsum(string equation, Tensors inputs, string name = null)
+ => math_ops.einsum(equation, inputs, name: name);
+
+ public Tensor eye(int num_rows,
+ int num_columns = -1,
+ Shape batch_shape = null,
+ TF_DataType dtype = TF_DataType.TF_DOUBLE,
+ string name = null)
+ => ops.eye(num_rows, num_columns: num_columns, batch_shape: batch_shape, dtype: dtype, name: name);
+
+ public Tensor diag(Tensor diagonal, string name = null)
+ => gen_array_ops.diag(diagonal, name: name);
+
+ public Tensor matmul(Tensor a, Tensor b)
+ => math_ops.matmul(a, b);
+
+ public Tensor norm(Tensor a, string ord = "euclidean", Axis axis = null, string name = null)
+ => ops.norm(a, ord: ord, axis: axis, name: name);
+
+ public Tensor batch_matmul(Tensor x, Tensor y, bool adj_x = false, bool adj_y = false, string name = null)
+ => math_ops.batch_matmul(x, y, adj_x: adj_x, adj_y: adj_y, name: name);
+
+ public Tensor inv(Tensor input, bool adjoint = false, string name = null)
+ => ops.matrix_inverse(input, adjoint: adjoint, name: name);
+
+ public Tensor global_norm(Tensor[] t_list, string name = null)
+ => clip_ops.global_norm(t_list, name: name);
+
+ public Tensor l2_normalize(Tensor x,
+ int axis = 0,
+ float epsilon = 1e-12f,
+ string name = null)
+ => nn_impl.l2_normalize(x, axis: axis, epsilon: constant_op.constant(epsilon), name: name);
+
+ public Tensor lstsq(Tensor matrix, Tensor rhs,
+ NDArray l2_regularizer = null, bool fast = true, string name = null)
+ => ops.matrix_solve_ls(matrix, rhs, l2_regularizer: l2_regularizer, fast: fast, name: name);
+
+ public Tensors qr(Tensor input, bool full_matrices = true, string name = null)
+ => ops.qr(input, full_matrices: full_matrices, name: name);
+
+ public Tensor tensor_diag_part(Tensor input, string name = null)
+ => gen_array_ops.diag_part(input, name: name);
+
+ public Tensor tensordot(Tensor x, Tensor y, NDArray axes, string name = null)
+ => math_ops.tensordot(x, y, axes, name: name);
+ }
+
public Tensor diag(Tensor diagonal, string name = null)
=> gen_array_ops.diag(diagonal, name: name);
- public Tensor matmul(Tensor a, Tensor b)
- => math_ops.matmul(a, b);
+ public Tensor matmul(Tensor a, Tensor b, bool transpose_a = false, bool transpose_b = false)
+ => math_ops.matmul(a, b, transpose_a: transpose_a, transpose_b: transpose_b);
- public Tensor batch_matmul(Tensor x, Tensor y)
- => gen_math_ops.batch_mat_mul(x, y);
+ ///
+ /// Multiply slices of the two matrices "x" and "y".
+ ///
+ ///
+ /// The `BatchMatMul` operation is embedded into the
+ /// `MatMul` operation on the DLL side. However the expected
+ /// attributes are not the same, hence we need to expose this
+ /// method to have the right args list on the `_apply_op_helper`
+ /// function.
+ ///
+ /// For each rank > 2 the first rank - 2 dimensions are considered
+ /// as fixed, and have to be consistent across the two matrices. A
+ /// common matrix multiplication is then applied over the residual
+ /// 2 dimensions.
+ ///
+ /// e.g.
+ /// x is (3, 6, 12); y is (3, 12, 6)
+ /// batch_matmul(x, y) ==> (3, 6, 6)
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public Tensor batch_matmul(Tensor x, Tensor y, bool adj_x = false, bool adj_y = false, string name = null)
+ => math_ops.batch_matmul(x, y, adj_x: adj_x, adj_y: adj_y, name: name);
}
}
diff --git a/src/TensorFlowNET.Core/APIs/tf.logging.cs b/src/TensorFlowNET.Core/APIs/tf.logging.cs
new file mode 100644
index 000000000..0e10c1610
--- /dev/null
+++ b/src/TensorFlowNET.Core/APIs/tf.logging.cs
@@ -0,0 +1,23 @@
+/*****************************************************************************
+ Copyright 2021 Haiping Chen. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+******************************************************************************/
+
+namespace Tensorflow
+{
+ public partial class tensorflow
+ {
+ public logging_ops logging => new logging_ops();
+ }
+}
diff --git a/src/TensorFlowNET.Core/APIs/tf.math.cs b/src/TensorFlowNET.Core/APIs/tf.math.cs
index 66e1ba00b..da54a9dd7 100644
--- a/src/TensorFlowNET.Core/APIs/tf.math.cs
+++ b/src/TensorFlowNET.Core/APIs/tf.math.cs
@@ -1,5 +1,5 @@
/*****************************************************************************
- Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
+ Copyright 2023 The TensorFlow.NET Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,13 +14,108 @@ You may obtain a copy of the License at
limitations under the License.
******************************************************************************/
-using Tensorflow.Eager;
+using Tensorflow.NumPy;
using Tensorflow.Operations;
namespace Tensorflow
{
public partial class tensorflow
{
+ public MathApi math { get; } = new MathApi();
+ public class MathApi
+ {
+ public Tensor argmax(Tensor input, Axis axis = null, string name = null, int? dimension = null, TF_DataType output_type = TF_DataType.TF_INT64)
+ => gen_math_ops.arg_max(input, axis, name: name, output_type: output_type);
+
+ public Tensor count_nonzero(Tensor input, Axis? axis = null, bool? keepdims = null, TF_DataType dtype = TF_DataType.TF_INT64, string name = null)
+ => math_ops.count_nonzero_v2(input, axis: axis, keepdims: keepdims ?? false, dtype: dtype);
+ public Tensor log(Tensor x, string name = null)
+ => gen_math_ops.log(x, name);
+
+ ///
+ /// Computes the Gauss error function of `x` element-wise.
+ ///
+ ///
+ ///
+ ///
+ public Tensor erf(Tensor x, string name = null)
+ => math_ops.erf(x, name);
+
+ public Tensor multiply(Tensor x, Tensor y, string name = null)
+ => math_ops.multiply(x, y, name: name);
+ public Tensor divide_no_nan(Tensor a, Tensor b, string name = null)
+ => math_ops.div_no_nan(a, b);
+
+ ///
+ /// Computes the Euclidean norm of elements across dimensions of a tensor.
+ ///
+ /// The tensor to reduce. Should have numeric type.
+ /// The dimensions to reduce. If `None` (the default), reduces all dimensions.Must be in the range `[-rank(input_tensor), rank(input_tensor))`
+ /// If true, retains reduced dimensions with length 1.
+ /// A name for the operation (optional).
+ /// The reduced tensor, of the same dtype as the input_tensor.
+ public Tensor reduce_euclidean_norm(Tensor input_tensor, Axis? axis = null, bool keepdims = false, string name = null)
+ => math_ops.reduce_euclidean_norm(input_tensor, axis: axis, keepdims: keepdims, name);
+
+ public Tensor square(Tensor x, string name = null)
+ => math_ops.square(x, name: name);
+
+ public Tensor sum(Tensor x, Axis? axis = null, string name = null)
+ => math_ops.reduce_sum(x, axis: axis, name: name);
+
+ public Tensor softplus(Tensor features, string name = null)
+ => nn_ops.softplus(features, name: name);
+
+ public Tensor tanh(Tensor x, string name = null)
+ => math_ops.tanh(x, name: name);
+
+ ///
+ /// Finds values and indices of the `k` largest entries for the last dimension.
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public Tensors top_k(Tensor input, int k, bool sorted = true, string name = null)
+ => nn_ops.top_kv2(input, k, sorted: sorted, name: name);
+
+ public Tensor in_top_k(Tensor predictions, Tensor targets, int k, string name = "InTopK")
+ => nn_ops.in_top_k(predictions, targets, k, name);
+
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public Tensor bincount(Tensor arr, Tensor weights = null,
+ Tensor minlength = null,
+ Tensor maxlength = null,
+ TF_DataType dtype = TF_DataType.TF_INT32,
+ string name = null,
+ Shape axis = null,
+ bool binary_output = false)
+ => math_ops.bincount(arr, weights: weights, minlength: minlength, maxlength: maxlength,
+ dtype: dtype, name: name, axis: axis, binary_output: binary_output);
+
+ public Tensor real(Tensor x, string name = null)
+ => gen_ops.real(x, x.dtype.real_dtype(), name);
+ public Tensor imag(Tensor x, string name = null)
+ => gen_ops.imag(x, x.dtype.real_dtype(), name);
+
+ public Tensor conj(Tensor x, string name = null)
+ => gen_ops.conj(x, name);
+ public Tensor angle(Tensor x, string name = null)
+ => gen_ops.angle(x, x.dtype.real_dtype(), name);
+ }
+
public Tensor abs(Tensor x, string name = null)
=> math_ops.abs(x, name);
@@ -45,8 +140,8 @@ public Tensor asin(Tensor x, string name = null)
public Tensor add(Tensor a, Tensor b, string name = null)
=> gen_math_ops.add(a, b, name: name);
- public Tensor add(Tx a, Ty b, string name = null)
- => gen_math_ops.add(a, b, name: name);
+ public Tensor add(Tx a, Ty b, string name = null)
+ => gen_math_ops.add(ops.convert_to_tensor(a), ops.convert_to_tensor(b), name: name);
///
/// Adds all input tensors element-wise.
@@ -67,10 +162,10 @@ public Tensor atan(Tensor x, string name = null)
=> gen_math_ops.atan(x, name);
public Tensor arg_max(Tensor input, int dimension, TF_DataType output_type = TF_DataType.TF_INT64, string name = null)
- => gen_math_ops.arg_max(input, dimension, output_type: output_type, name: name);
+ => gen_math_ops.arg_max(input, ops.convert_to_tensor(dimension), output_type: output_type, name: name);
public Tensor arg_min(Tensor input, int dimension, TF_DataType output_type = TF_DataType.TF_INT64, string name = null)
- => gen_math_ops.arg_min(input, dimension, output_type: output_type, name: name);
+ => gen_math_ops.arg_min(input, ops.convert_to_tensor(dimension), output_type: output_type, name: name);
public Tensor is_finite(Tensor input, string name = null)
=> gen_math_ops.is_finite(input, name);
@@ -114,6 +209,9 @@ public Tensor sinh(Tensor x, string name = null)
public Tensor cos(Tensor x, string name = null)
=> gen_math_ops.cos(x, name);
+ public Tensor cos(float x, string name = null)
+ => gen_math_ops.cos(ops.convert_to_tensor(x), name);
+
///
/// Computes hyperbolic cosine of x element-wise.
///
@@ -148,7 +246,7 @@ public Tensor floor(Tensor x, string name = null)
///
///
public Tensor greater(Tx x, Ty y, string name = null)
- => gen_math_ops.greater(x, y, name);
+ => gen_math_ops.greater(ops.convert_to_tensor(x), ops.convert_to_tensor(y), name);
///
/// Returns the truth value of (x >= y) element-wise.
@@ -160,10 +258,10 @@ public Tensor greater(Tx x, Ty y, string name = null)
///
///
public Tensor greater_equal(Tx x, Ty y, string name = null)
- => gen_math_ops.greater_equal(x, y, name);
+ => gen_math_ops.greater_equal(ops.convert_to_tensor(x), ops.convert_to_tensor(y), name);
///
- /// Returns the truth value of (x < y) element-wise.
+ /// Returns the truth value of (x < y) element-wise.
///
///
///
@@ -172,7 +270,7 @@ public Tensor greater_equal(Tx x, Ty y, string name = null)
///
///
public Tensor less(Tx x, Ty y, string name = null)
- => gen_math_ops.less(x, y, name);
+ => gen_math_ops.less(ops.convert_to_tensor(x), ops.convert_to_tensor(y), name);
///
/// Computes the log of the absolute value of `Gamma(x)` element-wise.
@@ -184,7 +282,7 @@ public Tensor lgamma(Tensor x, string name = null)
=> gen_math_ops.lgamma(x, name: name);
///
- /// Returns the truth value of (x <= y) element-wise.
+ /// Returns the truth value of (x <= y) element-wise.
///
///
///
@@ -193,7 +291,7 @@ public Tensor lgamma(Tensor x, string name = null)
///
///
public Tensor less_equal(Tx x, Ty y, string name = null)
- => gen_math_ops.less_equal(x, y, name);
+ => gen_math_ops.less_equal(ops.convert_to_tensor(x), ops.convert_to_tensor(y), name);
///
/// Computes natural logarithm of (1 + x) element-wise.
@@ -204,8 +302,8 @@ public Tensor less_equal(Tx x, Ty y, string name = null)
public Tensor log1p(Tensor x, string name = null)
=> gen_math_ops.log1p(x, name);
- public Tensor logical_and(Tensor x, Tensor y, string name = null)
- => gen_math_ops.logical_and(x, y, name);
+ public Tensor logical_and(T x, T y, string name = null)
+ => gen_math_ops.logical_and(ops.convert_to_tensor(x), ops.convert_to_tensor(y), name);
public Tensor logical_not(Tensor x, string name = null)
=> gen_math_ops.logical_not(x, name);
@@ -214,7 +312,10 @@ public Tensor logical_or(Tensor x, Tensor y, string name = null)
=> gen_math_ops.logical_or(x, y, name);
public Tensor logical_xor(Tensor x, Tensor y, string name = "LogicalXor")
- => gen_math_ops.logical_xor(x, y, name);
+ {
+ return gen_math_ops.logical_and(gen_math_ops.logical_or(x, y),
+ gen_math_ops.logical_not(gen_math_ops.logical_and(x, y)), name);
+ }
///
/// Clips tensor values to a specified min and max.
@@ -225,8 +326,8 @@ public Tensor logical_xor(Tensor x, Tensor y, string name = "LogicalXor")
///
///
public Tensor _clip_by_value(Tensor t, Tensor clip_value_min, Tensor clip_value_max, string name = null)
- => gen_math_ops._clip_by_value(t, clip_value_min, clip_value_max);
-
+ => gen_math_ops.clip_by_value(t, clip_value_min, clip_value_max);
+
///
/// Clips tensor values to a specified min and max.
///
@@ -254,17 +355,17 @@ public Tensor _clip_by_value(Tensor t, Tensor clip_value_min, Tensor clip_value_
/// Any values less than clip_value_min are set to clip_value_min. Any values
/// greater than clip_value_max are set to clip_value_max.
///
- public Tensor clip_by_value (Tensor t, Tensor clip_value_min, Tensor clip_value_max, string name = "ClipByValue")
+ public Tensor clip_by_value(Tensor t, T1 clip_value_min, T2 clip_value_max, string name = "ClipByValue")
=> clip_ops.clip_by_value(t, clip_value_min, clip_value_max, name);
-
+
public Tensor sub(Tx a, Ty b, string name = null)
- => gen_math_ops.sub(a, b, name: name);
+ => gen_math_ops.sub(ops.convert_to_tensor(a), ops.convert_to_tensor(b), name: name);
public Tensor divide(Tensor a, Tensor b)
=> a / b;
- public Tensor sqrt(Tensor a, string name = null)
- => gen_math_ops.sqrt(a, name);
+ public Tensor sqrt(Tensor a, string name = null)
+ => math_ops.sqrt(a, name);
public Tensor sign(Tensor a, string name = null)
=> gen_math_ops.sign(a, name);
@@ -286,7 +387,7 @@ public Tensor log(Tensor x, string name = null)
=> gen_math_ops.log(x, name);
public Tensor equal(Tensor x, Tensor y, string name = null)
- => gen_math_ops.equal(x, y, name);
+ => gen_math_ops.equal(x, y, name: name);
///
/// Computes arctangent of `y/x` element-wise, respecting signs of the arguments.
@@ -309,7 +410,7 @@ public Tensor atan2(Tensor y, Tensor x, string name = null)
///
///
public Tensor max(Tx input, Ty axis, bool keep_dims = false, string name = null)
- => gen_math_ops._max(input, axis, keep_dims: keep_dims, name: name);
+ => gen_math_ops.max(ops.convert_to_tensor(input), ops.convert_to_tensor(axis), keep_dims: keep_dims, name: name);
///
/// Computes the minimum of elements across dimensions of a tensor.
@@ -322,7 +423,7 @@ public Tensor max(Tx input, Ty axis, bool keep_dims = false, string name
///
///
public Tensor min(Tx input, Ty axis, bool keep_dims = false, string name = null)
- => gen_math_ops._min(input, axis, keep_dims: keep_dims, name: name);
+ => gen_math_ops.min(ops.convert_to_tensor(input), ops.convert_to_tensor(axis), keep_dims: keep_dims, name: name);
///
/// Returns the max of x and y (i.e. x > y ? x : y) element-wise.
@@ -334,10 +435,10 @@ public Tensor min(Tx input, Ty axis, bool keep_dims = false, string name
///
///
public Tensor maximum(T1 x, T2 y, string name = null)
- => gen_math_ops.maximum(x, y, name: name);
+ => gen_math_ops.maximum(ops.convert_to_tensor(x), ops.convert_to_tensor(y), name: name);
///
- /// Returns the min of x and y (i.e. x < y ? x : y) element-wise.
+ /// Returns the min of x and y (i.e. x < y ? x : y) element-wise.
///
///
///
@@ -346,7 +447,7 @@ public Tensor maximum(T1 x, T2 y, string name = null)
///
///
public Tensor minimum(T1 x, T2 y, string name = null)
- => gen_math_ops.minimum(x, y, name: name);
+ => gen_math_ops.minimum(ops.convert_to_tensor(x), ops.convert_to_tensor(y), name: name);
public Tensor multiply(Tensor x, Tensor y, string name = null)
=> gen_math_ops.mul(x, y, name: name);
@@ -360,9 +461,20 @@ public Tensor multiply(Tensor x, Tensor y, string name = null)
///
///
///
- public Tensor multiply(Tx x, Ty y, string name = null)
- => gen_math_ops.mul(x, y, name: name);
-
+ public Tensor multiply(Tx x, Ty y, string name = null)
+ => gen_math_ops.mul(ops.convert_to_tensor(x), ops.convert_to_tensor(y), name: name);
+ ///
+ /// return scalar product
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public Tensor dot_prod(Tx x, Ty y, NDArray axes, string name = null)
+ => math_ops.tensordot(convert_to_tensor(x), convert_to_tensor(y), axes, name: name);
public Tensor negative(Tensor x, string name = null)
=> gen_math_ops.neg(x, name);
@@ -412,9 +524,12 @@ public Tensor floordiv(Tensor x, Tensor y, string name = null)
public static Tensor truediv(Tensor x, Tensor y, string name = null)
=> math_ops.truediv(x, y, name: name);
- public Tensor range(object start, object limit = null, object delta = null, TF_DataType dtype = TF_DataType.DtInvalid, string name = "range")
+ public Tensor range(object start, object limit = null, object delta = null, TF_DataType? dtype = null, string name = "range")
=> math_ops.range(start, limit: limit, delta: delta, dtype: dtype, name: name);
+ public Tensor real(Tensor input, string name = null)
+ => math_ops.real(input, name);
+
///
/// Computes the "logical or" of elements across dimensions of a tensor.
///
@@ -423,12 +538,9 @@ public Tensor range(object start, object limit = null, object delta = null, TF_D
/// If true, retains reduced dimensions with length 1.
///
/// The reduced tensor.
- public Tensor reduce_any(Tensor input_tensor, int[] axis = null, bool keepdims = false, string name = null)
+ public Tensor reduce_any(Tensor input_tensor, Axis? axis = null, bool keepdims = false, string name = null)
=> math_ops.reduce_any(input_tensor, axis: axis, keepdims: keepdims, name: name);
- public Tensor reduce_any(Tensor input_tensor, int axis = 0, bool keepdims = false, string name = null)
- => math_ops.reduce_any(input_tensor, axis: new[] { axis }, keepdims: keepdims, name: name);
-
///
/// Computes the "logical and" of elements across dimensions of a tensor.
///
@@ -437,7 +549,7 @@ public Tensor reduce_any(Tensor input_tensor, int axis = 0, bool keepdims = fals
///
///
/// The reduced tensor.
- public Tensor reduce_all(Tensor input_tensor, int[] axis = null, bool keepdims = false, string name = null)
+ public Tensor reduce_all(Tensor input_tensor, Axis? axis = null, bool keepdims = false, string name = null)
=> math_ops.reduce_all(input_tensor, axis: axis, keepdims: keepdims, name: name);
///
@@ -448,43 +560,24 @@ public Tensor reduce_all(Tensor input_tensor, int[] axis = null, bool keepdims =
///
///
///
- public Tensor reduce_prod(Tensor input_tensor, int[] axis = null, bool keepdims = false, string name = null)
+ public Tensor reduce_prod(Tensor input_tensor, Axis? axis = null, bool keepdims = false, string name = null)
=> math_ops.reduce_prod(input_tensor, axis: axis, keepdims: keepdims, name: name);
- ///
- /// Computes the sum of elements across dimensions of a tensor.
- ///
- ///
- ///
- ///
- ///
- ///
- public Tensor reduce_sum(Tensor[] input_tensors, int? axis = null, bool keepdims = false, string name = null)
- => math_ops.reduce_sum(input_tensors, axis: axis, keepdims: keepdims, name: name);
-
///
/// Computes the sum of elements across dimensions of a tensor.
///
///
///
///
- public Tensor reduce_sum(Tensor input, int? axis = null, int? reduction_indices = null,
+ public Tensor reduce_sum(Tensor input, Axis? axis = null, Axis? reduction_indices = null,
bool keepdims = false, string name = null)
{
- if (!axis.HasValue && reduction_indices.HasValue && !keepdims)
- return math_ops.reduce_sum(input, reduction_indices.Value);
- else if (axis.HasValue && !reduction_indices.HasValue && !keepdims)
- return math_ops.reduce_sum(input, axis.Value);
- else if (axis.HasValue && !reduction_indices.HasValue && keepdims)
- return math_ops.reduce_sum(input, keepdims: keepdims, axis: axis.Value, name: name);
+ if (keepdims)
+ return math_ops.reduce_sum(input, axis: constant_op.constant(axis ?? reduction_indices), keepdims: keepdims, name: name);
else
- return math_ops.reduce_sum(input, keepdims: keepdims, name: name);
+ return math_ops.reduce_sum(input, axis: constant_op.constant(axis ?? reduction_indices));
}
- public Tensor reduce_sum(Tensor input, TensorShape axis, int? reduction_indices = null,
- bool keepdims = false, string name = null)
- => math_ops.reduce_sum(input, axis, keepdims: keepdims, name: name);
-
///
/// Computes the maximum of elements across dimensions of a tensor.
///
@@ -493,40 +586,43 @@ public Tensor reduce_sum(Tensor input, TensorShape axis, int? reduction_indices
///
///
///
- public Tensor reduce_max(Tensor input_tensor, int[] axis = null, bool keepdims = false, string name = null)
- => math_ops.reduce_max(input_tensor, axis, keepdims, name);
-
- public Tensor reduce_max(Tensor input_tensor, int axis, bool keepdims = false, string name = null)
+ public Tensor reduce_max(Tensor input_tensor, Axis? axis = null, bool keepdims = false, string name = null)
=> math_ops.reduce_max(input_tensor, axis, keepdims, name);
- public Tensor reduce_min(Tensor input_tensor, int[] axis = null, bool keepdims = false, string name = null)
+ public Tensor reduce_min(Tensor input_tensor, Axis? axis = null, bool keepdims = false, string name = null)
=> math_ops.reduce_min(input_tensor, axis, keepdims, name);
+ public Tensor reduce_std(Tensor input_tensor, Axis? axis = null, bool keepdims = false, string name = null)
+ => math_ops.reduce_std(input_tensor, axis, keepdims, name);
+
+ public Tensor reduce_variance(Tensor input_tensor, Axis? axis = null, bool keepdims = false, string name = null)
+ => math_ops.reduce_variance(input_tensor, axis, keepdims, name);
+
public Tensor sigmoid(T x, string name = null)
=> math_ops.sigmoid(x, name: name);
public Tensor sum(Tensor input, int axis, bool keep_dims = false, string name = null)
- => gen_math_ops._sum(input, axis, keep_dims: keep_dims, name: name);
+ => gen_math_ops.sum(input, ops.convert_to_tensor(axis), keep_dims: keep_dims, name: name);
- public Tensor reduce_mean(Tensor input_tensor, int[] axis = null, bool keepdims = false, string name = null, int? reduction_indices = null)
+ public Tensor reduce_mean(Tensor input_tensor, Axis? axis = null, bool keepdims = false, string name = null, int? reduction_indices = null)
=> math_ops.reduce_mean(input_tensor, axis: axis, keepdims: keepdims, name: name, reduction_indices: reduction_indices);
- public Tensor reduce_mean(Tensor[] input_tensors, int? axis = null, bool keepdims = false, string name = null)
- => math_ops.reduce_mean(input_tensors, axis: axis, keepdims: keepdims, name: name);
-
public Tensor round(Tensor x, string name = null)
=> gen_math_ops.round(x, name: name);
- public Tensor cast(Tensor x, TF_DataType dtype = TF_DataType.DtInvalid, string name = null)
+ public Tensor cast(Tensor x, TF_DataType dtype, string name = null)
=> math_ops.cast(x, dtype, name);
public Tensor cumsum(Tensor x, int axis = 0, bool exclusive = false, bool reverse = false, string name = null)
=> math_ops.cumsum(x, axis: axis, exclusive: exclusive, reverse: reverse, name: name);
- public Tensor argmax(Tensor input, int axis = -1, string name = null, int? dimension = null, TF_DataType output_type = TF_DataType.TF_INT64)
- => gen_math_ops.arg_max(input, axis, name: name, output_type: output_type);
-
public Tensor square(Tensor x, string name = null)
=> gen_math_ops.square(x, name: name);
+ public Tensor squared_difference(Tensor x, Tensor y, string name = null)
+ => gen_math_ops.squared_difference(x: x, y: y, name: name);
+ public Tensor complex(Tensor real, Tensor imag, Tensorflow.TF_DataType? dtype = null,
+ string name = null) => gen_ops.complex(real, imag, dtype, name);
+ public Tensor exp(Tensor x,
+ string name = null) => gen_math_ops.exp(x, name);
}
}
diff --git a/src/TensorFlowNET.Core/APIs/tf.nn.cs b/src/TensorFlowNET.Core/APIs/tf.nn.cs
index c8ce62f9b..112c48628 100644
--- a/src/TensorFlowNET.Core/APIs/tf.nn.cs
+++ b/src/TensorFlowNET.Core/APIs/tf.nn.cs
@@ -14,6 +14,7 @@ You may obtain a copy of the License at
limitations under the License.
******************************************************************************/
+using System.Xml.Linq;
using Tensorflow.Operations;
using Tensorflow.Operations.Activation;
using static Tensorflow.Binding;
@@ -26,24 +27,11 @@ public partial class tensorflow
public class nn_internal
{
- public Tensor conv2d(Tensor input, RefVariable filter, int[] strides, string padding, bool use_cudnn_on_gpu = true,
- string data_format= "NHWC", int[] dilations= null, string name = null)
+ public Tensor conv2d(Tensor input, Tensor filter, int[] strides, string padding, bool use_cudnn_on_gpu = true,
+ string data_format = "NHWC", int[] dilations = null, string name = null)
{
- var parameters = new Conv2dParams
- {
- Input = input,
- Filter = filter,
- Strides = strides,
- Padding = padding,
- UseCudnnOnGpu = use_cudnn_on_gpu,
- DataFormat = data_format,
- Name = name
- };
-
- if (dilations != null)
- parameters.Dilations = dilations;
-
- return gen_nn_ops.conv2d(parameters);
+ return gen_nn_ops.conv2d(input, filter, strides, padding, use_cudnn_on_gpu,
+ data_format: data_format, dilations: dilations, name: name);
}
public Tensor[] ctc_greedy_decoder(Tensor inputs, Tensor sequence_length, bool merge_repeated = true, string name = null)
@@ -65,8 +53,7 @@ public Tensor dropout(Tensor x, Tensor keep_prob = null, Tensor noise_shape = nu
Tensor keep = null;
if (keep_prob != null)
keep = 1.0f - keep_prob;
- var rate_tensor = keep;
-
+ var rate_tensor = rate.HasValue ? tf.constant(rate.Value) : keep;
return nn_ops.dropout_v2(x, rate: rate_tensor, noise_shape: noise_shape, seed: seed, name: name);
}
@@ -90,14 +77,14 @@ public Tensor elu(Tensor features, string name = null)
=> gen_nn_ops.elu(features, name: name);
public (Tensor, Tensor) moments(Tensor x,
- int[] axes,
+ Axis axes,
string name = null,
- bool keep_dims = false) => nn_impl.moments(x,
- axes,
- name: name,
+ bool keep_dims = false) => nn_impl.moments(x,
+ axes,
+ name: name,
keep_dims: keep_dims);
- public Tensor embedding_lookup(RefVariable @params,
+ public Tensor embedding_lookup(IVariableV1 @params,
Tensor ids,
string partition_strategy = "mod",
string name = null) => embedding_ops._embedding_lookup_and_transform(@params,
@@ -114,48 +101,78 @@ public Tensor embedding_lookup(Tensor @params,
name: name);
public IActivation relu() => new relu();
+
+
public IActivation swish() => new swish();
public IActivation tanh() => new tanh();
+
+ public IActivation softmax() => new softmax();
public Tensor tanh(Tensor x, string name = null)
- => gen_nn_ops.tanh(x, name);
+ => gen_math_ops.tanh(x, name);
- public Tensor relu(Tensor features, string name = null)
+ public Tensor relu(Tensor features, string name = null)
=> gen_nn_ops.relu(features, name);
+ public Tensor relu6(Tensor features, string name = null)
+ => gen_nn_ops.relu6(features, name);
+
public Tensor[] fused_batch_norm(Tensor x,
- IVariableV1 scale,
- IVariableV1 offset,
+ Tensor scale,
+ Tensor offset,
Tensor mean = null,
Tensor variance = null,
float epsilon = 0.001f,
string data_format = "NHWC",
bool is_training = true,
- string name = null) => nn_impl.fused_batch_norm(x, scale, offset, mean, variance,
+ string name = null,
+ float exponential_avg_factor = 1.0f) => nn_impl.fused_batch_norm(x, scale, offset, mean, variance,
epsilon: epsilon,
data_format: data_format,
is_training: is_training,
- name: name);
-
- public IPoolFunction max_pool_fn => new MaxPoolFunction();
+ name: name,
+ exponential_avg_factor: exponential_avg_factor);
- public Tensor max_pool(Tensor value, int[] ksize, int[] strides, string padding, string data_format = "NHWC", string name = null)
+ ///
+ /// Normalizes a tensor by `mean` and `variance`, and applies (optionally) a`scale` \\(\gamma\\) to it, as well as an `offset` \\(\beta\\).
+ ///
+ /// A floating point tensor.
+ /// A mean `Tensor`.
+ /// A variance `Tensor`.
+ /// An offset `Tensor`, often denoted \\(\beta\\) in equations, or NULL. If present, will be added to the normalized tensor.
+ /// A scale `Tensor`, often denoted \\(\gamma\\) in equations, or NULL. If present, the scale is applied to the normalized tensor.
+ /// A small float number to avoid dividing by 0.
+ /// A name for this operation.
+ /// the normalized, scaled, offset tensor.
+ public Tensor batch_normalization(Tensor x,
+ Tensor mean,
+ Tensor variance,
+ Tensor offset,
+ Tensor scale,
+ float variance_epsilon,
+ string name = null) => nn_impl.batch_normalization(x, mean, variance, offset, scale, variance_epsilon, name);
+
+
+ public Tensor max_pool(Tensor value, int[] ksize, int[] strides, string padding, string data_format = "NHWC", string name = null)
=> nn_ops.max_pool(value, ksize, strides, padding, data_format: data_format, name: name);
public Tensor in_top_k(Tensor predictions, Tensor targets, int k, string name = "InTopK")
=> nn_ops.in_top_k(predictions, targets, k, name);
public Tensor[] top_k(Tensor input, int k = 1, bool sorted = true, string name = null)
- => gen_nn_ops.top_kv2(input, k: k, sorted: sorted, name: name);
+ => gen_nn_ops.top_kv2(input, k: ops.convert_to_tensor(k), sorted: sorted, name: name);
- public Tensor bias_add(Tensor value, RefVariable bias, string data_format = null, string name = null)
+ public Tensor bias_add(Tensor value, IVariableV1 bias, string data_format = null, string name = null)
{
return tf_with(ops.name_scope(name, "BiasAdd", new { value, bias }), scope =>
{
name = scope;
- return gen_nn_ops.bias_add(value, bias, data_format: data_format, name: name);
+ return gen_nn_ops.bias_add(value, ops.convert_to_tensor(bias), data_format: data_format, name: name);
});
}
+ public Tensor l2_loss(Tensor t, string name = null)
+ => nn_ops.l2_loss(t, name: name);
+
///
/// Local Response Normalization.
///
@@ -168,7 +185,7 @@ public Tensor bias_add(Tensor value, RefVariable bias, string data_format = null
///
public Tensor lrn(Tensor input, int depth_radius = 5, int bias = 1,
int alpha = 1, float beta = 0.5f, string name = null)
- => gen_nn_ops.local_response_normalization(input, depth_radius: depth_radius, bias: bias,
+ => gen_nn_ops.lrn(input, depth_radius: depth_radius, bias: bias,
alpha: alpha, beta: beta, name: name);
public Tensor leaky_relu(Tensor features, float alpha = 0.2f, string name = null)
@@ -182,6 +199,7 @@ public Tensor sigmoid_cross_entropy_with_logits(Tensor labels, Tensor logits, st
public Tensor softmax(Tensor logits, int axis = -1, string name = null)
=> gen_nn_ops.softmax(logits, name);
+
///
/// Computes sparse softmax cross entropy between `logits` and `labels`.
///
diff --git a/src/TensorFlowNET.Core/APIs/tf.numpy.cs b/src/TensorFlowNET.Core/APIs/tf.numpy.cs
new file mode 100644
index 000000000..392ba915f
--- /dev/null
+++ b/src/TensorFlowNET.Core/APIs/tf.numpy.cs
@@ -0,0 +1,29 @@
+/*****************************************************************************
+ Copyright 2021 Haiping Chen. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+******************************************************************************/
+
+using Tensorflow.NumPy;
+
+namespace Tensorflow
+{
+ public partial class tensorflow
+ {
+ ///
+ /// NumPy API on TensorFlow
+ /// https://www.tensorflow.org/api_docs/python/tf/experimental/numpy
+ ///
+ public NumPyImpl numpy => new NumPyImpl();
+ }
+}
diff --git a/src/TensorFlowNET.Core/APIs/tf.ops.cs b/src/TensorFlowNET.Core/APIs/tf.ops.cs
index 86e979c4d..ebf35e3f9 100644
--- a/src/TensorFlowNET.Core/APIs/tf.ops.cs
+++ b/src/TensorFlowNET.Core/APIs/tf.ops.cs
@@ -27,25 +27,24 @@ public void add_to_collection(string name, T value)
public void add_to_collections(List names, T value)
=> get_default_graph().add_to_collections(names, value);
- public Tensor assign(Tensor @ref, object value, bool validate_shape = true, bool use_locking = true, string name = null)
- => state_ops.assign(@ref, value, validate_shape, use_locking, name);
-
- public Tensor assign(RefVariable @ref, object value, bool validate_shape = true, bool use_locking = true, string name = null)
- => state_ops.assign(@ref, value, validate_shape, use_locking, name);
+ public (Tensors, Tensor) clip_by_global_norm(Tensor[] t_list, float clip_norm, Tensor use_norm = null, string name = null)
+ => clip_ops.clip_by_global_norm(t_list, clip_norm, use_norm: use_norm, name: name);
- public Tensor assign(ResourceVariable @ref, object value, bool validate_shape = true, bool use_locking = true, string name = null)
+ public Tensor assign(IVariableV1 @ref, object value, bool validate_shape = true, bool use_locking = true, string name = null)
=> state_ops.assign(@ref, value, validate_shape, use_locking, name);
public void device(string device_name)
=> get_default_graph().device(device_name);
- public List get_collection(string key, string scope = "")
+ public List get_collection(string key, string scope = "")
=> get_default_graph().get_collection(key, scope: scope);
///
/// A context manager that lifts ops out of control-flow scopes and function-building graphs.
+ /// When eager execution is enabled, code inside an init_scope block runs with
+ /// eager execution enabled even when tracing a `tf.function`.
///
- public void init_scope()
+ public ops.NameScope init_scope()
=> ops.init_scope();
///
@@ -55,7 +54,7 @@ public void init_scope()
/// The default name to use if the name argument is None.
/// The list of Tensor arguments that are passed to the op function.
/// The scope name.
- public ops.NameScope name_scope(string name, string default_name = "", object values = null)
+ public ops.NameScope name_scope(string name, string default_name = "", object values = null)
=> new ops.NameScope(name, default_name, values);
///
diff --git a/src/TensorFlowNET.Core/APIs/tf.queue.cs b/src/TensorFlowNET.Core/APIs/tf.queue.cs
index 91947e5b6..a4757890e 100644
--- a/src/TensorFlowNET.Core/APIs/tf.queue.cs
+++ b/src/TensorFlowNET.Core/APIs/tf.queue.cs
@@ -14,7 +14,6 @@ You may obtain a copy of the License at
limitations under the License.
******************************************************************************/
-using System;
using Tensorflow.Queues;
namespace Tensorflow
@@ -33,7 +32,7 @@ public partial class tensorflow
///
public PaddingFIFOQueue PaddingFIFOQueue(int capacity,
TF_DataType[] dtypes,
- TensorShape[] shapes,
+ Shape[] shapes,
string[] names = null,
string shared_name = null,
string name = "padding_fifo_queue")
@@ -46,7 +45,7 @@ public PaddingFIFOQueue PaddingFIFOQueue(int capacity,
public PaddingFIFOQueue PaddingFIFOQueue(int capacity,
TF_DataType dtype,
- TensorShape shape,
+ Shape shape,
string shared_name = null,
string name = "padding_fifo_queue")
=> new PaddingFIFOQueue(capacity,
@@ -67,7 +66,7 @@ public PaddingFIFOQueue PaddingFIFOQueue(int capacity,
///
public FIFOQueue FIFOQueue(int capacity,
TF_DataType[] dtypes,
- TensorShape[] shapes = null,
+ Shape[] shapes = null,
string[] names = null,
string shared_name = null,
string name = "fifo_queue")
@@ -80,12 +79,12 @@ public FIFOQueue FIFOQueue(int capacity,
public FIFOQueue FIFOQueue(int capacity,
TF_DataType dtype,
- TensorShape shape = null,
+ Shape shape = null,
string shared_name = null,
string name = "fifo_queue")
=> new FIFOQueue(capacity,
new[] { dtype },
- new[] { shape ?? new TensorShape() },
+ new[] { shape ?? Shape.Null },
shared_name: shared_name,
name: name);
@@ -100,26 +99,26 @@ public FIFOQueue FIFOQueue(int capacity,
///
public PriorityQueue PriorityQueue(int capacity,
TF_DataType dtype,
- TensorShape shape = null,
+ Shape shape = null,
string shared_name = null,
string name = "priority_queue")
=> new PriorityQueue(capacity,
new[] { dtype },
- new[] { shape ?? new TensorShape() },
+ new[] { shape ?? Shape.Null },
shared_name: shared_name,
name: name);
public RandomShuffleQueue RandomShuffleQueue(int capacity,
int min_after_dequeue,
TF_DataType dtype,
- TensorShape shape = null,
+ Shape shape = null,
int? seed = null,
string shared_name = null,
string name = "random_shuffle_queue")
=> new RandomShuffleQueue(capacity,
min_after_dequeue: min_after_dequeue,
new[] { dtype },
- new[] { shape ?? new TensorShape() },
+ new[] { shape ?? Shape.Null },
seed: seed,
shared_name: shared_name,
name: name);
diff --git a/src/TensorFlowNET.Core/APIs/tf.random.cs b/src/TensorFlowNET.Core/APIs/tf.random.cs
index d6c7d93a4..4f4962840 100644
--- a/src/TensorFlowNET.Core/APIs/tf.random.cs
+++ b/src/TensorFlowNET.Core/APIs/tf.random.cs
@@ -32,22 +32,66 @@ public class Random
///
///
///
- public Tensor normal(TensorShape shape,
+ public Tensor normal(Shape shape,
float mean = 0.0f,
float stddev = 1.0f,
TF_DataType dtype = TF_DataType.TF_FLOAT,
int? seed = null,
string name = null) => random_ops.random_normal(shape, mean, stddev, dtype, seed, name);
+
+ public Tensor stateless_normal(Shape shape,
+ float mean = 0.0f,
+ float stddev = 1.0f,
+ TF_DataType dtype = TF_DataType.TF_FLOAT,
+ string name = null) => stateless_random_ops.stateless_random_normal(shape, mean, stddev, dtype, name: name);
+
+ ///
+ /// Outputs random values from a truncated normal distribution.
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public Tensor truncated_normal(Shape shape,
+ float mean = 0.0f,
+ float stddev = 1.0f,
+ TF_DataType dtype = TF_DataType.TF_FLOAT,
+ int? seed = null,
+ string name = null) => random_ops.truncated_normal(shape, mean, stddev, dtype, seed, name);
+
+ public Tensor categorical(
+ Tensor logits,
+ int num_samples,
+ int? seed = null,
+ string name = null,
+ TF_DataType output_dtype = TF_DataType.DtInvalid) => random_ops.multinomial(logits, num_samples, seed: seed, name: name, output_dtype: output_dtype);
+
+ public Tensor uniform(Shape shape,
+ float minval = 0,
+ float maxval = 1,
+ TF_DataType dtype = TF_DataType.TF_FLOAT,
+ int? seed = null,
+ string name = null)
+ {
+ if (dtype.is_integer())
+ return random_ops.random_uniform_int(shape, (int)minval, (int)maxval, seed, name);
+ else
+ return random_ops.random_uniform(shape, minval, maxval, dtype, seed, name);
+ }
}
- public Tensor random_uniform(TensorShape shape,
+ public Tensor random_uniform(Shape shape,
float minval = 0,
float maxval = 1,
TF_DataType dtype = TF_DataType.TF_FLOAT,
int? seed = null,
- string name = null) => random_ops.random_uniform(shape, minval, maxval, dtype, seed, name);
+ string name = null)
+ => random.uniform(shape, minval: minval, maxval: maxval, dtype: dtype, seed: seed, name: name);
- public Tensor truncated_normal(TensorShape shape,
+ public Tensor truncated_normal(Shape shape,
float mean = 0.0f,
float stddev = 1.0f,
TF_DataType dtype = TF_DataType.TF_FLOAT,
@@ -69,11 +113,16 @@ public Tensor random_shuffle(Tensor value, int? seed = null, string name = null)
=> random_ops.random_shuffle(value, seed: seed, name: name);
public void set_random_seed(int seed)
- => ops.get_default_graph().seed = seed;
+ {
+ if (executing_eagerly())
+ Context.set_global_seed(seed);
+ else
+ ops.get_default_graph().seed = seed;
+ }
public Tensor multinomial(Tensor logits, int num_samples, int? seed = null,
string name = null, TF_DataType output_dtype = TF_DataType.DtInvalid)
- => random_ops.multinomial(logits, num_samples, seed: seed,
+ => random_ops.multinomial(logits, num_samples, seed: seed,
name: name, output_dtype: output_dtype);
}
}
diff --git a/src/TensorFlowNET.Core/APIs/tf.reduce_logsumexp.cs b/src/TensorFlowNET.Core/APIs/tf.reduce_logsumexp.cs
index 325f06339..41f0ec45d 100644
--- a/src/TensorFlowNET.Core/APIs/tf.reduce_logsumexp.cs
+++ b/src/TensorFlowNET.Core/APIs/tf.reduce_logsumexp.cs
@@ -19,7 +19,7 @@ namespace Tensorflow
public partial class tensorflow
{
public Tensor reduce_logsumexp(Tensor input_tensor,
- int[] axis = null,
+ Axis? axis = null,
bool keepdims = false,
string name = null) => math_ops.reduce_logsumexp(input_tensor, axis, keepdims, name);
diff --git a/src/TensorFlowNET.Core/APIs/tf.reshape.cs b/src/TensorFlowNET.Core/APIs/tf.reshape.cs
index b69247092..102a81323 100644
--- a/src/TensorFlowNET.Core/APIs/tf.reshape.cs
+++ b/src/TensorFlowNET.Core/APIs/tf.reshape.cs
@@ -18,12 +18,19 @@ namespace Tensorflow
{
public partial class tensorflow
{
- public Tensor reshape(T1 tensor,
- T2 shape,
- string name = null) => gen_array_ops.reshape(tensor, shape, name);
+ public Tensor reshape(Tensor tensor,
+ Shape shape,
+ string name = null)
+ => gen_array_ops.reshape(tensor, shape, name);
+
+ public Tensor reshape(Tensor tensor,
+ Tensor shape,
+ string name = null)
+ => gen_array_ops.reshape(tensor, shape, name);
public Tensor reshape(Tensor tensor,
- int[] shape,
- string name = null) => gen_array_ops.reshape(tensor, shape, name);
+ object[] shape,
+ string name = null)
+ => array_ops.reshape(tensor, shape, name);
}
}
diff --git a/src/TensorFlowNET.Core/APIs/tf.saved_model.cs b/src/TensorFlowNET.Core/APIs/tf.saved_model.cs
new file mode 100644
index 000000000..ef6251ca8
--- /dev/null
+++ b/src/TensorFlowNET.Core/APIs/tf.saved_model.cs
@@ -0,0 +1,20 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+using Tensorflow.Train;
+
+namespace Tensorflow
+{
+ public partial class tensorflow
+ {
+ public SavedModelAPI saved_model { get; } = new SavedModelAPI();
+ }
+
+ public class SavedModelAPI
+ {
+ public Trackable load(string export_dir, LoadOptions? options = null)
+ {
+ return Loader.load(export_dir, options);
+ }
+ }
+}
diff --git a/src/TensorFlowNET.Core/APIs/tf.signal.cs b/src/TensorFlowNET.Core/APIs/tf.signal.cs
new file mode 100644
index 000000000..2471124c5
--- /dev/null
+++ b/src/TensorFlowNET.Core/APIs/tf.signal.cs
@@ -0,0 +1,40 @@
+/*****************************************************************************
+ Copyright 2023 Konstantin Balashov All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+******************************************************************************/
+
+using Tensorflow.Operations;
+
+namespace Tensorflow
+{
+ public partial class tensorflow
+ {
+ public SignalApi signal { get; } = new SignalApi();
+ public class SignalApi
+ {
+ public Tensor fft(Tensor input, string name = null)
+ => gen_ops.f_f_t(input, name: name);
+ public Tensor ifft(Tensor input, string name = null)
+ => gen_ops.i_f_f_t(input, name: name);
+ public Tensor fft2d(Tensor input, string name = null)
+ => gen_ops.f_f_t2d(input, name: name);
+ public Tensor ifft2d(Tensor input, string name = null)
+ => gen_ops.i_f_f_t2d(input, name: name);
+ public Tensor fft3d(Tensor input, string name = null)
+ => gen_ops.f_f_t3d(input, name: name);
+ public Tensor ifft3d(Tensor input, string name = null)
+ => gen_ops.i_f_f_t3d(input, name: name);
+ }
+ }
+}
diff --git a/src/TensorFlowNET.Core/APIs/tf.sparse.cs b/src/TensorFlowNET.Core/APIs/tf.sparse.cs
index c615a6149..f124f6105 100644
--- a/src/TensorFlowNET.Core/APIs/tf.sparse.cs
+++ b/src/TensorFlowNET.Core/APIs/tf.sparse.cs
@@ -14,17 +14,18 @@ You may obtain a copy of the License at
limitations under the License.
******************************************************************************/
+using System;
using Tensorflow.Framework;
namespace Tensorflow
{
public partial class tensorflow
{
- public SparseTensor SparseTensor(long[,] indices, T[] values, long[] dense_shape)
- => new SparseTensor(indices, values, dense_shape);
+ public SparseTensor SparseTensor(long[,] indices, Array values, long[] dense_shape)
+ => new SparseTensor(indices, values, dense_shape);
- public Tensor sparse_tensor_to_dense(SparseTensor sp_input,
- T default_value = default,
+ public Tensor sparse_tensor_to_dense(SparseTensor sp_input,
+ Array default_value = default,
bool validate_indices = true,
string name = null)
=> gen_sparse_ops.sparse_to_dense(sp_input.indices,
@@ -46,13 +47,13 @@ public Tensor sparse_tensor_to_dense(SparseTensor sp_input,
///
/// Dense `Tensor` of shape `output_shape`. Has the same type as `sparse_values`.
public Tensor sparse_to_dense(Tensor sparse_indices,
- TensorShape output_shape,
+ Shape output_shape,
T sparse_values,
T default_value = default,
bool validate_indices = true,
string name = null)
=> gen_sparse_ops.sparse_to_dense(sparse_indices,
- output_shape,
+ output_shape,
sparse_values,
default_value: default_value,
validate_indices: validate_indices,
diff --git a/src/TensorFlowNET.Core/APIs/tf.state.cs b/src/TensorFlowNET.Core/APIs/tf.state.cs
index c57d03c6c..d86f88b17 100644
--- a/src/TensorFlowNET.Core/APIs/tf.state.cs
+++ b/src/TensorFlowNET.Core/APIs/tf.state.cs
@@ -18,7 +18,7 @@ namespace Tensorflow
{
public partial class tensorflow
{
- public Tensor assign_add(RefVariable @ref, T value,
+ public ITensorOrOperation assign_add(IVariableV1 @ref, T value,
bool use_locking = false, string name = null)
=> state_ops.assign_add(@ref, value, use_locking: use_locking, name: name);
}
diff --git a/src/TensorFlowNET.Core/APIs/tf.strings.cs b/src/TensorFlowNET.Core/APIs/tf.strings.cs
index 38d92803a..ecaf775d0 100644
--- a/src/TensorFlowNET.Core/APIs/tf.strings.cs
+++ b/src/TensorFlowNET.Core/APIs/tf.strings.cs
@@ -14,19 +14,82 @@ You may obtain a copy of the License at
limitations under the License.
******************************************************************************/
-using System.Collections.Generic;
-using Tensorflow.IO;
+using static Tensorflow.Binding;
namespace Tensorflow
{
public partial class tensorflow
{
- public strings_internal strings = new strings_internal();
- public class strings_internal
+ public StringsApi strings { get; } = new StringsApi();
+
+ public class StringsApi
{
+ string_ops ops = new string_ops();
+
+ ///
+ /// Converts all uppercase characters into their respective lowercase replacements.
+ ///
+ ///
+ ///
+ ///
+ ///
+ public Tensor lower(Tensor input, string encoding = "", string name = null)
+ => ops.lower(input: input, encoding: encoding, name: name);
+
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public Tensor regex_replace(Tensor input, string pattern, string rewrite,
+ bool replace_global = true, string name = null)
+ => ops.regex_replace(input, pattern, rewrite,
+ replace_global: replace_global, name: name);
+
+ ///
+ /// Return substrings from `Tensor` of strings.
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
public Tensor substr(Tensor input, int pos, int len,
string name = null, string @uint = "BYTE")
- => string_ops.substr(input, pos, len, name: name, @uint: @uint);
+ => ops.substr(input, pos, len, @uint: @uint, name: name);
+
+ public Tensor substr(string input, int pos, int len,
+ string name = null, string @uint = "BYTE")
+ => ops.substr(input, pos, len, @uint: @uint, name: name);
+
+ ///
+ /// String lengths of `input`.
+ ///
+ ///
+ ///
+ ///
+ ///
+ public Tensor string_length(Tensor input, string name = null, string unit = "BYTE")
+ => ops.string_length(input, name: name, unit: unit);
+
+ public Tensor format(string template, Tensor[] inputs, string placeholder = "{}", int summarize = 3, string name = null)
+ => ops.string_format(inputs, template: template, placeholder: placeholder, summarize: summarize, name: name);
+
+ public RaggedTensor split(Tensor input, char sep = ' ', int maxsplit = -1, string name = null)
+ => ops.string_split_v2(input, sep: sep.ToString(), maxsplit : maxsplit, name : name);
+
+ public (RaggedTensor, RaggedTensor) unicode_decode_with_offsets(Tensor input, string input_encoding,
+ string errors = "replace", int replacement_char = 0xFFFD,
+ bool replace_control_characters = false, string name = null)
+ => ops.unicode_decode_with_offsets(input, input_encoding, errors,
+ replacement_char: replacement_char,
+ replace_control_characters: replace_control_characters,
+ name: name);
}
}
}
diff --git a/src/TensorFlowNET.Core/APIs/tf.summary.cs b/src/TensorFlowNET.Core/APIs/tf.summary.cs
index a2611739c..4d0492b60 100644
--- a/src/TensorFlowNET.Core/APIs/tf.summary.cs
+++ b/src/TensorFlowNET.Core/APIs/tf.summary.cs
@@ -20,7 +20,7 @@ public partial class tensorflow
{
public Summaries.Summary summary = new Summaries.Summary();
- public Tensor scalar(string name, Tensor tensor)
+ public Tensor scalar(string name, Tensor tensor)
=> summary.scalar(name, tensor);
}
}
diff --git a/src/TensorFlowNET.Core/APIs/tf.tensor.cs b/src/TensorFlowNET.Core/APIs/tf.tensor.cs
index 8ba78f42b..b03168ab3 100644
--- a/src/TensorFlowNET.Core/APIs/tf.tensor.cs
+++ b/src/TensorFlowNET.Core/APIs/tf.tensor.cs
@@ -14,12 +14,14 @@ You may obtain a copy of the License at
limitations under the License.
******************************************************************************/
+using Tensorflow.Operations;
+
namespace Tensorflow
{
public partial class tensorflow
{
- public Tensor convert_to_tensor(object value, TF_DataType dtype = TF_DataType.DtInvalid, string name = null, TF_DataType preferred_dtype = TF_DataType.DtInvalid)
- => ops.convert_to_tensor(value, dtype, name, preferred_dtype);
+ public Tensor convert_to_tensor(object value, TF_DataType dtype = TF_DataType.DtInvalid, string name = null, TF_DataType preferred_dtype = TF_DataType.DtInvalid)
+ => ops.convert_to_tensor(value, dtype, name, preferred_dtype: preferred_dtype);
public Tensor strided_slice(Tensor input, Tensor begin, Tensor end, Tensor strides = null,
int begin_mask = 0,
@@ -44,10 +46,10 @@ public Tensor strided_slice(Tensor input, T[] begin, T[] end, T[] strides = n
int ellipsis_mask = 0,
int new_axis_mask = 0,
int shrink_axis_mask = 0,
- string name = null) => gen_array_ops.strided_slice(input: input,
- begin: begin,
- end: end,
- strides: strides,
+ string name = null) => array_ops.strided_slice(input,
+ begin: ops.convert_to_tensor(begin),
+ end: ops.convert_to_tensor(end),
+ strides: ops.convert_to_tensor(strides),
begin_mask: begin_mask,
end_mask: end_mask,
ellipsis_mask: ellipsis_mask,
@@ -66,11 +68,30 @@ public Tensor strided_slice(Tensor input, T[] begin, T[] end, T[] strides = n
/// A name for the operation (optional)
/// if num_or_size_splits is a scalar returns num_or_size_splits Tensor objects;
/// if num_or_size_splits is a 1-D Tensor returns num_or_size_splits.get_shape[0] Tensor objects resulting from splitting value.
- public Tensor[] split(Tensor value, int num_split, Tensor axis, string name = null) => gen_array_ops.split(
+ public Tensor[] split(Tensor value, int num_split, Axis axis, string name = null)
+ => array_ops.split(
+ value: value,
+ num_or_size_splits: num_split,
+ axis: axis,
+ name: name);
+
+ public Tensor[] split(Tensor value, int[] num_split, Axis axis, string name = null)
+ => array_ops.split(
value: value,
+ num_or_size_splits: num_split,
axis: axis,
- num_split: num_split,
- name: name
- );
+ name: name);
+
+ //public Tensor[] split(Tensor value, int num_split, Axis axis, string name = null)
+ // => array_ops.split(
+ // value: value,
+ // num_or_size_splits: num_split,
+ // axis: axis,
+ // name: name);
+
+ public Tensor ensure_shape(Tensor x, Shape shape, string name = null)
+ {
+ return gen_ops.ensure_shape(x, shape, name);
+ }
}
}
diff --git a/src/TensorFlowNET.Core/APIs/tf.tile.cs b/src/TensorFlowNET.Core/APIs/tf.tile.cs
index 0995dc275..a3b497e8a 100644
--- a/src/TensorFlowNET.Core/APIs/tf.tile.cs
+++ b/src/TensorFlowNET.Core/APIs/tf.tile.cs
@@ -13,15 +13,22 @@ You may obtain a copy of the License at
See the License for the specific language governing permissions and
limitations under the License.
******************************************************************************/
-
-using NumSharp;
+using static Tensorflow.Binding;
namespace Tensorflow
{
public partial class tensorflow
{
- public Tensor tile(Tensor input,
- T multiples,
- string name = null) => gen_array_ops.tile(input, multiples, name);
+ public Tensor tile(Tensor input, Tensor multiples, string name = null)
+ => gen_array_ops.tile(input, multiples, name);
+
+ public Tensor tile(Tensor input, object[] multiples, string name = null)
+ => array_ops.tile(input, constant_op.constant(shape_utils.from_object_array(multiples).dims), name);
+
+ public Tensor tile(Tensor input, Shape multiples, string name = null)
+ {
+ var multiples_tensor = constant_op.constant(multiples);
+ return gen_array_ops.tile(input, multiples_tensor, name);
+ }
}
}
diff --git a/src/TensorFlowNET.Core/APIs/tf.train.cs b/src/TensorFlowNET.Core/APIs/tf.train.cs
index ca0ecc32e..cf02ed599 100644
--- a/src/TensorFlowNET.Core/APIs/tf.train.cs
+++ b/src/TensorFlowNET.Core/APIs/tf.train.cs
@@ -15,7 +15,6 @@ limitations under the License.
******************************************************************************/
using System.Collections.Generic;
-using Tensorflow.Keras.Optimizers;
using Tensorflow.Train;
namespace Tensorflow
@@ -26,34 +25,37 @@ public partial class tensorflow
public class train_internal
{
- public RefVariable create_global_step(Graph graph)
+ public IVariableV1 create_global_step(Graph graph)
=> TrainingUtil.create_global_step(graph);
- public RefVariable get_global_step(Graph graph)
+ public IVariableV1 get_global_step(Graph graph)
=> TrainingUtil.get_global_step(graph);
- public Optimizer GradientDescentOptimizer(float learning_rate)
+ public Optimizer GradientDescentOptimizer(float learning_rate)
=> new GradientDescentOptimizer(learning_rate);
public Optimizer GradientDescentOptimizer(Tensor learning_rate)
=> new GradientDescentOptimizer(learning_rate);
- public Optimizer AdamOptimizer(float learning_rate, string name = "Adam")
- => new AdamOptimizer(learning_rate, name: name);
+ public Optimizer AdamOptimizer(float learning_rate, float epsilon = 1e-8f, string name = "Adam")
+ => new AdamOptimizer(learning_rate, epsilon: epsilon, name: name);
public Optimizer AdamOptimizer(float learning_rate, TF_DataType dtype, string name = "Adam")
=> new AdamOptimizer(learning_rate, name: name, dtype: dtype);
+ public Optimizer AdamOptimizer(IVariableV1 learning_rate, string name = "Adam")
+ => new AdamOptimizer(learning_rate.AsTensor(), name: name);
+
public Optimizer AdamOptimizer(Tensor learning_rate, string name = "Adam")
=> new AdamOptimizer(learning_rate, name: name);
public ExponentialMovingAverage ExponentialMovingAverage(float decay)
=> new ExponentialMovingAverage(decay);
- public Saver Saver(IVariableV1[] var_list = null, int max_to_keep = 5)
+ public Saver Saver(IVariableV1[] var_list = null, int max_to_keep = 5)
=> new Saver(var_list: var_list, max_to_keep: max_to_keep);
- public string write_graph(Graph graph, string logdir, string name, bool as_text = true)
+ public string write_graph(Graph graph, string logdir, string name, bool as_text = true)
=> graph_io.write_graph(graph, logdir, name, as_text);
public Graph load_graph(string freeze_graph_pb)
@@ -84,7 +86,7 @@ public string latest_checkpoint(string checkpoint_dir, string latest_filename =
public CheckpointState get_checkpoint_state(string checkpoint_dir, string latest_filename = null)
=> checkpoint_management.get_checkpoint_state(checkpoint_dir, latest_filename: latest_filename);
- public Tensor polynomial_decay(float learning_rate,
+ /*public Tensor polynomial_decay(float learning_rate,
RefVariable global_step,
float decay_steps,
float end_learning_rate = 0.0001f,
@@ -102,7 +104,7 @@ public Tensor polynomial_decay(float learning_rate,
var decayed_lr = decayed.__call__(global_step);
return decayed_lr;
- }
+ }*/
}
}
}
diff --git a/src/TensorFlowNET.Core/APIs/tf.variable.cs b/src/TensorFlowNET.Core/APIs/tf.variable.cs
index 5ebc305ba..9ce864bd8 100644
--- a/src/TensorFlowNET.Core/APIs/tf.variable.cs
+++ b/src/TensorFlowNET.Core/APIs/tf.variable.cs
@@ -37,10 +37,7 @@ public Operation variables_initializer(IVariableV1[] var_list, string name = "in
=> variables.variables_initializer(var_list, name: name);
public Operation global_variables_initializer()
- {
- var g = variables.global_variables();
- return variables.variables_initializer(g.ToArray());
- }
+ => tf.compat.v1.global_variables_initializer();
///
/// Returns all variables created with `trainable=True`.
@@ -50,30 +47,6 @@ public Operation global_variables_initializer()
public IVariableV1[] trainable_variables(string scope = null)
=> (variables.trainable_variables() as List).ToArray();
- public RefVariable get_variable(string name,
- TensorShape shape = null,
- TF_DataType dtype = TF_DataType.DtInvalid,
- object initializer = null, // IInitializer or Tensor
- bool? trainable = null,
- List collections = null,
- bool? use_resource = null,
- bool validate_shape = true,
- VariableSynchronization synchronization = VariableSynchronization.Auto,
- VariableAggregation aggregation = VariableAggregation.None)
- {
- var scope = Tensorflow.variable_scope.get_variable_scope();
- var store = Tensorflow.variable_scope._get_default_variable_store();
- return scope.get_variable(store,
- name,
- shape: shape,
- dtype: dtype,
- use_resource: use_resource,
- validate_shape: validate_shape,
- initializer: initializer,
- trainable: trainable,
- collections: collections);
- }
-
public VariableScope get_variable_scope()
=> Tensorflow.variable_scope.get_variable_scope();
}
diff --git a/src/TensorFlowNET.Core/Assembly/Properties.cs b/src/TensorFlowNET.Core/Assembly/Properties.cs
index 28aee65e2..290a72df0 100644
--- a/src/TensorFlowNET.Core/Assembly/Properties.cs
+++ b/src/TensorFlowNET.Core/Assembly/Properties.cs
@@ -1,4 +1,4 @@
using System.Runtime.CompilerServices;
#if DEBUG
-[assembly: InternalsVisibleTo("TensorFlowNET.UnitTest, PublicKey=00240000048000009400000006020000002400005253413100040000010001004b86c4cb78549b34bab61a3b1800e23bfeb5b3ec390074041536a7e3cbd97f5f04cf0f857155a8928eaa29ebfd11cfbbad3ba70efea7bda3226c6a8d370a4cd303f714486b6ebc225985a638471e6ef571cc92a4613c00b8fa65d61ccee0cbe5f36330c9a01f4183559f1bef24cc2917c6d913e3a541333a1d05d9bed22b38cb")]
+[assembly: InternalsVisibleTo("Tensorflow.UnitTest, PublicKey=00240000048000009400000006020000002400005253413100040000010001004b86c4cb78549b34bab61a3b1800e23bfeb5b3ec390074041536a7e3cbd97f5f04cf0f857155a8928eaa29ebfd11cfbbad3ba70efea7bda3226c6a8d370a4cd303f714486b6ebc225985a638471e6ef571cc92a4613c00b8fa65d61ccee0cbe5f36330c9a01f4183559f1bef24cc2917c6d913e3a541333a1d05d9bed22b38cb")]
#endif
diff --git a/src/TensorFlowNET.Core/Attributes/c_api.ops.cs b/src/TensorFlowNET.Core/Attributes/c_api.ops.cs
index 77877a436..ba6f653a1 100644
--- a/src/TensorFlowNET.Core/Attributes/c_api.ops.cs
+++ b/src/TensorFlowNET.Core/Attributes/c_api.ops.cs
@@ -32,7 +32,7 @@ public partial class c_api
/// TF_Status*
///
[DllImport(TensorFlowLibName)]
- public static extern TF_AttrMetadata TF_OperationGetAttrMetadata(IntPtr oper, string attr_name, IntPtr status);
+ public static extern TF_AttrMetadata TF_OperationGetAttrMetadata(IntPtr oper, string attr_name, SafeStatusHandle status);
///
/// Fills in `value` with the value of the attribute `attr_name`. `value` must
@@ -46,8 +46,8 @@ public partial class c_api
/// size_t
/// TF_Status*
[DllImport(TensorFlowLibName)]
- public static extern void TF_OperationGetAttrString(IntPtr oper, string attr_name, IntPtr value, uint max_length, IntPtr status);
-
+ public static extern void TF_OperationGetAttrString(IntPtr oper, string attr_name, IntPtr value, uint max_length, SafeStatusHandle status);
+
///
/// Sets `output_attr_value` to the binary-serialized AttrValue proto
/// representation of the value of the `attr_name` attr of `oper`.
@@ -55,13 +55,28 @@ public partial class c_api
///
///
[DllImport(TensorFlowLibName)]
- public static extern int TF_OperationGetAttrValueProto(IntPtr oper, string attr_name, IntPtr output_attr_value, IntPtr status);
+ public static extern int TF_OperationGetAttrValueProto(IntPtr oper, string attr_name, SafeBufferHandle output_attr_value, SafeStatusHandle status);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern void TF_OperationGetAttrType(IntPtr oper, string attr_name, IntPtr value, SafeStatusHandle status);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern void TF_OperationGetAttrInt(IntPtr oper, string attr_name, IntPtr value, SafeStatusHandle status);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern void TF_OperationGetAttrFloat(IntPtr oper, string attr_name, IntPtr value, SafeStatusHandle status);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern void TF_OperationGetAttrBool(IntPtr oper, string attr_name, IntPtr value, SafeStatusHandle status);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern void TF_OperationGetAttrShape(IntPtr oper, string attr_name, long[] value, int num_dims, SafeStatusHandle status);
[DllImport(TensorFlowLibName)]
public static extern void TF_SetAttrBool(IntPtr desc, string attr_name, bool value);
[DllImport(TensorFlowLibName)]
- public static extern void TF_SetAttrValueProto(IntPtr desc, string attr_name, IntPtr proto, uint proto_len, IntPtr status);
+ public static extern void TF_SetAttrValueProto(IntPtr desc, string attr_name, byte[] proto, ulong proto_len, SafeStatusHandle status);
///
/// Set `num_dims` to -1 to represent "unknown rank".
@@ -99,7 +114,7 @@ public partial class c_api
public static extern void TF_SetAttrStringList(IntPtr desc, string attr_name, IntPtr[] values, uint[] lengths, int num_values);
[DllImport(TensorFlowLibName)]
- public static extern void TF_SetAttrTensor(IntPtr desc, string attr_name, IntPtr value, IntPtr status);
+ public static extern void TF_SetAttrTensor(IntPtr desc, string attr_name, SafeTensorHandle value, SafeStatusHandle status);
[DllImport(TensorFlowLibName)]
public static extern void TF_SetAttrType(IntPtr desc, string attr_name, TF_DataType value);
diff --git a/src/TensorFlowNET.Core/Binding.FuncTools.cs b/src/TensorFlowNET.Core/Binding.FuncTools.cs
index 8705cf447..42a7b4ef9 100644
--- a/src/TensorFlowNET.Core/Binding.FuncTools.cs
+++ b/src/TensorFlowNET.Core/Binding.FuncTools.cs
@@ -1,8 +1,4 @@
using System;
-using System.Collections.Generic;
-using System.Linq;
-using System.Text;
-using System.Threading.Tasks;
namespace Tensorflow
{
diff --git a/src/TensorFlowNET.Core/Binding.Util.cs b/src/TensorFlowNET.Core/Binding.Util.cs
index 809dde46f..99ed5c1f3 100644
--- a/src/TensorFlowNET.Core/Binding.Util.cs
+++ b/src/TensorFlowNET.Core/Binding.Util.cs
@@ -14,14 +14,15 @@ You may obtain a copy of the License at
limitations under the License.
******************************************************************************/
-using NumSharp;
+using Tensorflow.NumPy;
using System;
using System.Collections;
using System.Collections.Generic;
using System.ComponentModel;
using System.Diagnostics;
+using System.IO;
using System.Linq;
-using NumSharp.Utilities;
+using Tensorflow.Operations;
namespace Tensorflow
{
@@ -31,15 +32,47 @@ namespace Tensorflow
public static partial class Binding
{
public static T2 get(this Dictionary dict, T1 key)
- => key == null ?
- default(T2) :
- (dict.ContainsKey(key) ? dict[key] : default(T2));
+ => key == null ?
+ default :
+ (dict.ContainsKey(key) ? dict[key] : default);
+
+ public static void Update(this IList list, T element)
+ {
+ var index = list.IndexOf(element);
+ if (index < 0)
+ list.Add(element);
+ else
+ {
+ list[index] = element;
+ }
+ }
+
+ public static void difference_update(this IList list, IList list2)
+ {
+ foreach(var el in list2)
+ {
+ if (list.Contains(el))
+ list.Remove(el);
+ }
+ }
public static void add(this IList list, T element)
=> list.Add(element);
+ public static void add(this IList list, IEnumerable elements)
+ {
+ foreach (var ele in elements)
+ list.Add(ele);
+ }
+
public static void append(this IList list, T element)
- => list.Add(element);
+ => list.Insert(list.Count, element);
+
+ public static void append(this IList list, IList elements)
+ {
+ for (int i = 0; i < elements.Count(); i++)
+ list.Insert(list.Count, elements[i]);
+ }
public static T[] concat(this IList list1, IList list2)
{
@@ -57,71 +90,88 @@ private static string _tostring(object obj)
switch (obj)
{
case NDArray nd:
- return nd.ToString(false);
- case Array arr:
- if (arr.Rank!=1 || arr.GetType().GetElementType()?.IsArray == true)
+ return nd.ToString();
+ /*case Array arr:
+ if (arr.Rank != 1 || arr.GetType().GetElementType()?.IsArray == true)
arr = Arrays.Flatten(arr);
var objs = toObjectArray(arr);
- return $"[{string.Join(", ", objs.Select(_tostring))}]";
+ return $"[{string.Join(", ", objs.Select(_tostring))}]";*/
default:
return obj?.ToString() ?? "null";
}
+ }
+
+ private static TextWriter _writer = Console.Out;
- object[] toObjectArray(Array arr)
+ public static TextWriter tf_output_redirect {
+ set
{
- var len = arr.LongLength;
- var ret = new object[len];
- for (long i = 0; i < len; i++)
+ if(_writer != null)
{
- ret[i] = arr.GetValue(i);
+ _writer.Flush();
+ if (_writer is StringWriter sw)
+ sw.GetStringBuilder().Clear();
}
- return ret;
+ _writer = value;
}
+ get => _writer ?? Console.Out;
}
public static void print(object obj)
{
- Console.WriteLine(_tostring(obj));
+ tf_output_redirect.WriteLine(_tostring(obj));
}
public static void print(string format, params object[] objects)
{
if (!format.Contains("{}"))
{
- Console.WriteLine(format + " " + string.Join(" ", objects.Select(x => x.ToString())));
+ tf_output_redirect.WriteLine(format + " " + string.Join(" ", objects.Select(x => x.ToString())));
return;
}
- foreach(var obj in objects)
+ foreach (var obj in objects)
{
}
- Console.WriteLine(format);
+ tf_output_redirect.WriteLine(format);
}
public static int len(object a)
{
switch (a)
{
+ case Tensor tensor:
+ return (int)tensor.shape[0];
+ case Tensors arr:
+ return arr.Length;
case Array arr:
return arr.Length;
case IList arr:
return arr.Count;
case ICollection arr:
return arr.Count;
- case NDArray ndArray:
- return ndArray.ndim == 0 ? 1 : ndArray.shape[0];
case IEnumerable enumerable:
return enumerable.OfType
///
[DllImport(TensorFlowLibName)]
- public static extern IntPtr TF_NewBuffer();
+ public static extern SafeBufferHandle TF_NewBuffer();
[DllImport(TensorFlowLibName)]
- public static extern IntPtr TF_GetBuffer(TF_Buffer buffer);
+ public static extern TF_Buffer TF_GetBuffer(SafeBufferHandle buffer);
///
/// Makes a copy of the input and sets an appropriate deallocator. Useful for
@@ -42,6 +42,6 @@ public partial class c_api
/// size_t
///
[DllImport(TensorFlowLibName)]
- public static extern IntPtr TF_NewBufferFromString(IntPtr proto, ulong proto_len);
+ public static extern SafeBufferHandle TF_NewBufferFromString(IntPtr proto, ulong proto_len);
}
}
diff --git a/src/TensorFlowNET.Core/Checkpoint/CheckPointUtils.cs b/src/TensorFlowNET.Core/Checkpoint/CheckPointUtils.cs
new file mode 100644
index 000000000..071b41875
--- /dev/null
+++ b/src/TensorFlowNET.Core/Checkpoint/CheckPointUtils.cs
@@ -0,0 +1,171 @@
+using System;
+using System.Collections.Generic;
+using System.Diagnostics;
+using System.IO;
+using System.Linq;
+using Tensorflow.Functions;
+using Tensorflow.Train;
+using Tensorflow.Training;
+using pbc = global::Google.Protobuf.Collections;
+
+namespace Tensorflow.Checkpoint;
+
+public static class CheckPointUtils
+{
+ private static string _ESCAPE_CHAR = ".";
+ public static (IList, IDictionary>, IDictionary,
+ IDictionary>,
+ IDictionary) objects_ids_and_slot_variables_and_paths(ObjectGraphView graph_view)
+ {
+ var (trackable_objects, node_paths) = graph_view.breadth_first_traversal();
+ Dictionary object_names = new();
+ foreach (var pair in node_paths)
+ {
+ object_names[pair.Key] = TrackableUtils.object_path_to_string(pair.Value);
+ }
+
+ Dictionary node_ids = new();
+ for (int i = 0; i < trackable_objects.Count; i++)
+ {
+ node_ids[trackable_objects[i]] = i;
+ }
+
+ var slot_variables = serialize_slot_variables(trackable_objects, node_ids, object_names);
+ return (trackable_objects, node_paths, node_ids, slot_variables, object_names);
+ }
+
+ public static
+ IDictionary>
+ serialize_slot_variables(IEnumerable trackable_objects,
+ IDictionary node_ids, IDictionary object_names)
+ {
+ var non_slot_objects = trackable_objects.ToList();
+ Dictionary>
+ slot_variables = new();
+ foreach (var trackable in non_slot_objects)
+ {
+ if (trackable is not Optimizer)
+ {
+ continue;
+ }
+
+ var optim = (Optimizer)trackable;
+ var slot_names = optim.get_slot_names();
+ foreach (var slot_name in slot_names)
+ {
+ for (int original_variable_node_id = 0;
+ original_variable_node_id < non_slot_objects.Count;
+ original_variable_node_id++)
+ {
+ var original_variable = non_slot_objects[original_variable_node_id];
+ IVariableV1 slot_variable;
+ if (original_variable is not IVariableV1)
+ {
+ slot_variable = null;
+ }
+ slot_variable = optim.get_slot((IVariableV1)original_variable, slot_name);
+ if(slot_variable is null) continue;
+
+ // There're some problems about the inherits of `Variable` and `Trackable`.
+ throw new NotImplementedException();
+ }
+ }
+ }
+
+ return slot_variables;
+ }
+
+ public static Trackable get_mapped_trackable(Trackable trackable, IDictionary? object_map)
+ {
+ if (object_map is null || !object_map.TryGetValue(trackable, out var possible_res))
+ {
+ return trackable;
+ }
+ else
+ {
+ return possible_res;
+ }
+ }
+
+ public static string get_full_name(Trackable variable)
+ {
+ // TODO: This state is not correct, the whole framework need to be updated in the future.
+ if (!(variable is IVariableV1 || resource_variable_ops.is_resource_variable(variable)))
+ {
+ return "";
+ }
+ // skip the check of attribute `_save_slice_info` .
+
+ // TODO: Need to be revised!!!
+ Debug.Assert(variable is BaseResourceVariable);
+ return ((BaseResourceVariable)variable).Name;
+ }
+
+ public static void add_checkpoint_values_check(TrackableObjectGraph object_graph_proto)
+ {
+ HashSet checkpointed_trackables = new();
+ Dictionary> parents = new();
+ for (int i = 0; i < object_graph_proto.Nodes.Count; i++)
+ {
+ var object_proto = object_graph_proto.Nodes[i];
+ // skip the process of registered saver.
+ if (object_proto.Attributes is not null && object_proto.Attributes.Count > 0 ||
+ object_proto.SlotVariables is not null && object_proto.SlotVariables.Count > 0)
+ {
+ checkpointed_trackables.Add(i);
+ }
+
+ foreach (var child_proto in object_proto.Children)
+ {
+ var child = child_proto.NodeId;
+ if (!parents.ContainsKey(child))
+ {
+ parents[child] = new HashSet();
+ }
+
+ parents[child].Add(i);
+ }
+ }
+
+ Queue to_visit = new(checkpointed_trackables.AsEnumerable());
+ while (to_visit.Count > 0)
+ {
+ var trackable = to_visit.Dequeue();
+ if (!parents.ContainsKey(trackable)) continue;
+ var current_parents = parents[trackable];
+ foreach (var parent in current_parents)
+ {
+ checkpointed_trackables.Add(parent);
+ if (parents.ContainsKey(parent))
+ {
+ to_visit.Enqueue(parent);
+ }
+ }
+ parents.Remove(trackable);
+ }
+
+ // TODO: Complete it after supporting checkpoint.
+ // for (int i = 0; i < object_graph_proto.Nodes.Count; i++)
+ // {
+ // object_graph_proto.Nodes[i].has_checkpoint_values.value = checkpointed_trackables.Contains(i);
+ // }
+ }
+
+ ///
+ /// Traverse the object graph and list all accessible objects.
+ ///
+ ///
+ public static IList list_objects(ObjectGraphView graph_view)
+ {
+ return objects_ids_and_slot_variables_and_paths(graph_view).Item1;
+ }
+
+ internal static IEnumerable _objects_with_attributes(IEnumerable full_list)
+ {
+ return full_list.Where(x =>
+ {
+ var saveables = x.gather_saveables_for_checkpoint();
+ return saveables is not null && saveables.Count > 0;
+ });
+ }
+}
diff --git a/src/TensorFlowNET.Core/Checkpoint/CheckpointOptions.cs b/src/TensorFlowNET.Core/Checkpoint/CheckpointOptions.cs
new file mode 100644
index 000000000..75b392af8
--- /dev/null
+++ b/src/TensorFlowNET.Core/Checkpoint/CheckpointOptions.cs
@@ -0,0 +1,5 @@
+namespace Tensorflow.Checkpoint;
+
+public record class CheckpointOptions(
+ string? experimental_io_device = null,
+ bool experimental_enable_async_checkpoint = false);
diff --git a/src/TensorFlowNET.Core/Checkpoint/CheckpointReader.cs b/src/TensorFlowNET.Core/Checkpoint/CheckpointReader.cs
new file mode 100644
index 000000000..a1dba371c
--- /dev/null
+++ b/src/TensorFlowNET.Core/Checkpoint/CheckpointReader.cs
@@ -0,0 +1,69 @@
+namespace Tensorflow.Checkpoint;
+
+public class CheckpointReader
+{
+ private SafeCheckpointReaderHandle _handle;
+ public Dictionary VariableToDataTypeMap { get; set; }
+ public Dictionary VariableToShapeMap { get; set; }
+
+ public CheckpointReader(string filename)
+ {
+ Status status = new Status();
+ VariableToDataTypeMap = new Dictionary();
+ VariableToShapeMap = new Dictionary();
+ _handle = c_api.TF_NewCheckpointReader(filename, status);
+ status.Check(true);
+ ReadAllShapeAndType();
+ }
+
+ public int HasTensor(string name)
+ => c_api.TF_CheckpointReaderHasTensor(_handle, name);
+
+ ///
+ /// Get the variable name.
+ ///
+ ///
+ ///
+ public string GetVariable(int index)
+ => c_api.StringPiece(c_api.TF_CheckpointReaderGetVariable(_handle, index));
+
+ public int Size()
+ => c_api.TF_CheckpointReaderSize(_handle);
+
+ public TF_DataType GetVariableDataType(string name)
+ => c_api.TF_CheckpointReaderGetVariableDataType(_handle, name);
+
+ public Shape GetVariableShape(string name)
+ {
+ int num_dims = GetVariableNumDims(name);
+ long[] dims = new long[num_dims];
+ Status status = new Status();
+ c_api.TF_CheckpointReaderGetVariableShape(_handle, name, dims, num_dims, status);
+ status.Check(true);
+ return new Shape(dims);
+ }
+
+ public int GetVariableNumDims(string name)
+ => c_api.TF_CheckpointReaderGetVariableNumDims(_handle, name);
+
+ public unsafe Tensor GetTensor(string name, TF_DataType dtype = TF_DataType.DtInvalid)
+ {
+ Status status = new Status();
+ var tensor = c_api.TF_CheckpointReaderGetTensor(_handle, name, status);
+ status.Check(true);
+ return new Tensor(tensor);
+ }
+
+ private void ReadAllShapeAndType()
+ {
+ int size = Size();
+ for(int i = 0; i < size; i++)
+ {
+ var name = GetVariable(i);
+ var shape = GetVariableShape(name);
+ var dtype = GetVariableDataType(name);
+ VariableToDataTypeMap[name] = dtype;
+ VariableToShapeMap[name] = shape;
+ }
+ }
+}
diff --git a/src/TensorFlowNET.Core/Checkpoint/ObjectGraphView.cs b/src/TensorFlowNET.Core/Checkpoint/ObjectGraphView.cs
new file mode 100644
index 000000000..f435dd88b
--- /dev/null
+++ b/src/TensorFlowNET.Core/Checkpoint/ObjectGraphView.cs
@@ -0,0 +1,64 @@
+using System;
+using System.Collections.Generic;
+using System.Linq;
+using Serilog.Debugging;
+using Tensorflow.Keras.Saving.SavedModel;
+using Tensorflow.Train;
+
+namespace Tensorflow.Checkpoint;
+
+public class ObjectGraphView: TrackableView, ICloneable
+{
+ protected IEnumerable? _attached_dependencies;
+ // TODO: attached_dependencies
+ public ObjectGraphView(Trackable root, IEnumerable? attached_dependencies = null): base(root)
+ {
+ _attached_dependencies = attached_dependencies;
+ }
+
+ public object Clone()
+ {
+ // TODO: Implement real deep copy corresponding to tensorflow/python/checkpoint/graph_view.ObjectGraphView.__deepcopy__
+ return new ObjectGraphView(Root, _attached_dependencies);
+ }
+
+ public virtual List list_children(Trackable obj, SaveType save_type = SaveType.CHECKPOINT, IDictionary>? serialization_cache = null)
+ {
+ List res = base.children(obj, save_type, serialization_cache)
+ .Select(x => new TrackableReference(x.Key, x.Value)).ToList();
+ // Check the reference, not value.
+ if (obj == Root && _attached_dependencies is not null)
+ {
+ res.AddRange(_attached_dependencies);
+ }
+
+ return res;
+ }
+
+ public override IDictionary children(Trackable obj, SaveType save_type = SaveType.CHECKPOINT, IDictionary>? serialization_cache = null)
+ {
+ return list_children(obj, save_type, serialization_cache).ToDictionary(x => x.Name, x => x.Refer);
+ }
+
+ public IEnumerable? AttachedDependencies
+ {
+ get => _attached_dependencies;
+ }
+
+ public virtual (IList, IDictionary>) breadth_first_traversal()
+ {
+ return base._descendants_with_paths();
+ }
+
+ // TODO: complete the implementation
+ public void serialize_object_graph(object? saveables_cache = null)
+ {
+ throw new NotImplementedException();
+ }
+
+ // TODO: complete the implementation
+ public void frozen_saveable_objects(object? object_map = null, object? to_graph = null, object call_with_mapped_captures = null)
+ {
+ throw new NotImplementedException();
+ }
+}
diff --git a/src/TensorFlowNET.Core/Checkpoint/SafeCheckpointReaderHandle.cs b/src/TensorFlowNET.Core/Checkpoint/SafeCheckpointReaderHandle.cs
new file mode 100644
index 000000000..674e83512
--- /dev/null
+++ b/src/TensorFlowNET.Core/Checkpoint/SafeCheckpointReaderHandle.cs
@@ -0,0 +1,21 @@
+using Tensorflow.Util;
+
+namespace Tensorflow.Checkpoint;
+
+public sealed class SafeCheckpointReaderHandle : SafeTensorflowHandle
+{
+ private SafeCheckpointReaderHandle() : base ()
+ {
+ }
+
+ public SafeCheckpointReaderHandle(IntPtr handle) : base(handle)
+ {
+ }
+
+ protected override bool ReleaseHandle()
+ {
+ c_api.TF_DeleteCheckpointReader(handle);
+ SetHandle(IntPtr.Zero);
+ return true;
+ }
+}
diff --git a/src/TensorFlowNET.Core/Checkpoint/SaveUtil.cs b/src/TensorFlowNET.Core/Checkpoint/SaveUtil.cs
new file mode 100644
index 000000000..7a5da7e3a
--- /dev/null
+++ b/src/TensorFlowNET.Core/Checkpoint/SaveUtil.cs
@@ -0,0 +1,261 @@
+using OneOf;
+using System;
+using System.Collections.Generic;
+using System.Diagnostics;
+using System.Linq;
+using System.Text;
+using Tensorflow.Train;
+using Tensorflow.Training;
+using Tensorflow.Common.Extensions;
+using pbc = global::Google.Protobuf.Collections;
+
+namespace Tensorflow.Checkpoint
+{
+ internal record class TrackableData(
+ // A trackable in the root Trackable object graph.
+ Trackable trackable,
+ // The index at which the Trackable appears in TrackableObjectGraph.nodes.
+ int node_id,
+ // The BFS-generated path from the root object / used to generate readable checkpoint keys.
+ string object_name,
+ // A list of ObjectReference for each child connected to this Trackable.
+ pbc::RepeatedField children_proto,
+ // A list of SlotVariableReference to save to the object (only valid for Optimizer objects).
+ pbc::RepeatedField slot_variable_proto,
+ // The object to save to checkpoint. Usually this is the same as `trackable`,
+ // but can differ when the the caller wants to specify a different object to
+ // save. For example, when saving checkpoints asynchronously, variables are
+ // copied to the CPU. `object_to_save` is set as the copied variable.
+ Trackable object_to_save
+ );
+ public static class SaveUtil
+ {
+ public static (IDictionary>>>, IDictionary, IDictionary>, TrackableObjectGraph)
+ serialize_graph_view(ObjectGraphView graph_view, IDictionary? object_map = null, bool call_with_mapped_captures = false, object? cache = null)
+ {
+ var (trackable_data, node_ids) = gather_trackable_data(graph_view, object_map);
+ var (tensor_trackables, pystate_trackables, registered_trackables) = split_trackables(trackable_data);
+
+ var object_graph_proto = fill_object_graph_proto(trackable_data);
+
+ var serialized_tensors = get_and_write_tensors_to_serialize(tensor_trackables, node_ids, call_with_mapped_captures, cache, object_graph_proto);
+ var registered_savers = get_and_write_registered_savers(registered_trackables, object_graph_proto);
+
+ Dictionary feed_additions;
+ if(cache is null)
+ {
+ feed_additions = null;
+ serialized_tensors = serialized_tensors.Concat(get_and_write_tensors_to_serialize(pystate_trackables, node_ids, call_with_mapped_captures,
+ cache, object_graph_proto)).ToDictionary(x => x.Key, x => x.Value);
+ }
+ else
+ {
+ feed_additions = null;
+ // TODO: deal with cache.
+ throw new NotFiniteNumberException();
+ }
+
+ CheckPointUtils.add_checkpoint_values_check(object_graph_proto);
+
+ return (serialized_tensors, feed_additions, registered_savers, object_graph_proto);
+ }
+
+ private static (IList, IDictionary) gather_trackable_data(ObjectGraphView graph_view, IDictionary? object_map)
+ {
+ var (trackable_objects, node_paths) = graph_view.breadth_first_traversal();
+ Dictionary object_names = new();
+ foreach(var pair in node_paths)
+ {
+ object_names[pair.Key] = TrackableUtils.object_path_to_string(pair.Value);
+ }
+ Dictionary node_ids = new();
+ for(int i = 0; i < trackable_objects.Count; i++)
+ {
+ node_ids[trackable_objects[i]] = i;
+ }
+ var slot_variables = CheckPointUtils.serialize_slot_variables(trackable_objects, node_ids, object_names);
+ List trackable_data = new();
+ foreach(var trackable in trackable_objects)
+ {
+ pbc::RepeatedField children_proto = new();
+ foreach(var child in graph_view.list_children(trackable))
+ {
+ children_proto.Add(new TrackableObjectGraph.Types.TrackableObject.Types.ObjectReference()
+ {
+ NodeId = node_ids[child.Refer],
+ LocalName = child.Name
+ });
+ }
+ slot_variables.TryGetValue(trackable, out var slot_variable);
+ trackable_data.Add(new TrackableData(
+ trackable: trackable,
+ node_id: node_ids[trackable],
+ object_name: object_names[trackable],
+ children_proto: children_proto,
+ slot_variable_proto: slot_variable??new pbc.RepeatedField(),
+ object_to_save: CheckPointUtils.get_mapped_trackable(trackable, object_map)
+ ));
+ }
+ return (trackable_data, node_ids);
+ }
+
+ private static TrackableObjectGraph fill_object_graph_proto(IList trackable_data)
+ {
+ TrackableObjectGraph object_graph_proto = new();
+ for(int i = 0; i < trackable_data.Count; i++)
+ {
+ var td = trackable_data[i];
+ Debug.Assert(td.node_id == i);
+ TrackableObjectGraph.Types.TrackableObject trackable_object = new();
+ trackable_object.SlotVariables.AddRange(td.slot_variable_proto);
+ trackable_object.Children.AddRange(td.children_proto);
+ object_graph_proto.Nodes.Add(trackable_object);
+ }
+ return object_graph_proto;
+ }
+
+ ///
+ /// Creates dictionary of tensors to checkpoint, and updates the proto.
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ private static IDictionary>>> get_and_write_tensors_to_serialize(IList tensor_trackables, IDictionary node_ids,
+ bool call_with_mapped_captures, object? cache, TrackableObjectGraph object_graph_proto)
+ {
+ Dictionary>>> serialized_tensors = new();
+ foreach(var td in tensor_trackables)
+ {
+ // TODO: deal with cache.
+ var legacy_name = SaveableCompat.get_saveable_name(td.object_to_save) ?? "";
+ Trackable trackable = null;
+ IDictionary>> tensor_dict;
+ if(!saveable_object_util.trackable_has_serialize_to_tensor(td.object_to_save) || legacy_name.Length > 0)
+ {
+ (trackable, tensor_dict) = get_tensors_from_legacy_saveable(td, node_ids, call_with_mapped_captures, object_graph_proto);
+ }
+ else
+ {
+ tensor_dict = get_tensors_from_trackable(td, call_with_mapped_captures, object_graph_proto);
+ trackable = td.object_to_save;
+ }
+ if(trackable is not null)
+ {
+ serialized_tensors[trackable] = tensor_dict;
+ }
+ else
+ {
+ serialized_tensors[Trackable.None] = tensor_dict;
+ }
+ }
+ return serialized_tensors;
+ }
+
+ private static IDictionary>> get_tensors_from_trackable(TrackableData trackable_data, bool call_with_mapped_captures, TrackableObjectGraph object_graph_proto)
+ {
+ var trackable = trackable_data.object_to_save;
+
+ // TODO: complete it. Note that actually `call_with_mapped_captures` is of function type.
+ IDictionary>> ret_tensor_dict;
+ if (call_with_mapped_captures)
+ {
+ throw new NotImplementedException();
+ }
+ else
+ {
+ ret_tensor_dict = trackable.serialize_to_tensors();
+ }
+
+ Dictionary>> tensor_dict = new();
+ foreach(var pair in ret_tensor_dict)
+ {
+ var local_name = TrackableUtils.escape_local_name(pair.Key);
+ var maybe_tensor = pair.Value;
+ var checkpoint_key = TrackableUtils.checkpoint_key(trackable_data.object_name, local_name);
+
+ tensor_dict[checkpoint_key] = maybe_tensor;
+
+ foreach(var key in maybe_tensor.Keys)
+ {
+ if (maybe_tensor[key].IsTypeOrDeriveFrom())
+ {
+ maybe_tensor[key].AsT1.name = local_name + maybe_tensor[key].AsT1.name;
+ }
+ }
+
+ if(object_graph_proto is not null)
+ {
+ object_graph_proto.Nodes[trackable_data.node_id].Attributes.Add(new TrackableObjectGraph.Types.TrackableObject.Types.SerializedTensor()
+ {
+ Name = local_name,
+ CheckpointKey = checkpoint_key,
+ FullName = CheckPointUtils.get_full_name(trackable)
+ });
+ }
+ }
+ return tensor_dict;
+ }
+
+ ///
+ /// Gets tensors to serialize from a Trackable with legacy SaveableObjects.
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ private static (Trackable, IDictionary>>) get_tensors_from_legacy_saveable(TrackableData trackable_data, IDictionary node_ids,
+ bool call_with_mapped_captures, TrackableObjectGraph object_graph_proto)
+ {
+ Dictionary object_names = new();
+ object_names[trackable_data.trackable] = trackable_data.object_name;
+ Dictionary object_map = new();
+ object_map[trackable_data.trackable] = trackable_data.object_to_save;
+
+ var (checkpoint_factory_map, _) = SaveUtilV1.get_checkpoint_factories_and_keys(object_names, object_map);
+ var (named_saveable_objects, _) = SaveUtilV1.generate_saveable_objects(checkpoint_factory_map, object_graph_proto, node_ids, object_map,
+ call_with_mapped_captures, saveables_cache: null);
+ var trackable = new SaveableCompatibilityConverter(trackable_data.object_to_save, named_saveable_objects);
+ return (trackable, trackable.serialize_to_tensors());
+ }
+
+ private static IDictionary> get_and_write_registered_savers(IDictionary> registered_trackables, TrackableObjectGraph object_graph_proto)
+ {
+ Dictionary> registered_savers = new();
+ foreach(var pair in registered_trackables)
+ {
+ foreach(var td in pair.Value)
+ {
+ if (registered_savers.ContainsKey(pair.Key))
+ {
+ registered_savers[pair.Key] = new Dictionary();
+ }
+ else
+ {
+ registered_savers[pair.Key][td.object_name] = td.object_to_save;
+ }
+
+ var object_proto = object_graph_proto.Nodes[td.node_id];
+ // TODO: add APIs and complete it. Now the `TrackableObjectGraph.Types.TrackableObject` lacks `registered_savers`.
+ }
+ }
+ return registered_savers;
+ }
+
+ private static (IList, IList, IDictionary>) split_trackables(IEnumerable trackable_data)
+ {
+ List tensor_trackables = new();
+ List py_state_trackables = new(); // skip the process of `PyState` for the lack of API. This is only a pleceholder.
+ Dictionary> registered_trackables = new();
+
+ foreach(var td in trackable_data)
+ {
+ // TODO: deal with registration.
+ tensor_trackables.Add(td);
+ }
+ return (tensor_trackables, py_state_trackables, registered_trackables);
+ }
+ }
+}
diff --git a/src/TensorFlowNET.Core/Checkpoint/SaveUtilV1.cs b/src/TensorFlowNET.Core/Checkpoint/SaveUtilV1.cs
new file mode 100644
index 000000000..9280179c0
--- /dev/null
+++ b/src/TensorFlowNET.Core/Checkpoint/SaveUtilV1.cs
@@ -0,0 +1,225 @@
+using System;
+using System.Collections.Generic;
+using System.Diagnostics;
+using System.Linq;
+using Tensorflow.Exceptions;
+using Tensorflow.Train;
+using Tensorflow.Training;
+using pbc = global::Google.Protobuf.Collections;
+using static Tensorflow.Binding;
+using Google.Protobuf;
+using OneOf;
+
+namespace Tensorflow.Checkpoint;
+
+public static class SaveUtilV1
+{
+ public static (IDictionary>, object?) get_checkpoint_factories_and_keys(IDictionary object_names,
+ IDictionary? object_map = null)
+ {
+ // According to https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/registration/README.md,
+ // till now only internal registrations are allowed. So, we won't return a saver in this function.
+ // The implementation of this function should be updated if tensorflow update it.
+ Dictionary> checkpoint_factory_map = new();
+ foreach (var pair in object_names)
+ {
+ var trackable = pair.Key;
+ var object_name = pair.Value;
+ var object_to_save = CheckPointUtils.get_mapped_trackable(trackable, object_map);
+
+ // skip the registration process.
+
+ List current_list = new();
+ foreach (var name_and_factory in saveable_object_util.saveable_objects_from_trackable(object_to_save))
+ {
+ // treat name as key_suffix.
+ var name = name_and_factory.Key;
+ var checkpoint_key = TrackableUtils.checkpoint_key(object_name, name);
+
+ current_list.Add(new CheckpointFactoryData(name_and_factory.Value, name, checkpoint_key));
+ }
+
+ checkpoint_factory_map[trackable] = current_list;
+ }
+
+ return (checkpoint_factory_map, null);
+ }
+
+ public static (IList, IDictionary>?) frozen_saveables_and_savers(ObjectGraphView graph_view,
+ IDictionary object_map, Graph? to_graph, bool call_with_mapped_captures,
+ object? saveables_cache = null)
+ {
+ if (to_graph is not null)
+ {
+ var g = to_graph.as_default();
+ var (named_saveable_objects, graph_proto, _, registered_savers) = serialize_gathered_objects(graph_view,
+ object_map, call_with_mapped_captures, saveables_cache);
+ var object_graph_tensor = tf_with(ops.device("/cpu:0"), _ =>
+ {
+ // TODO(Rinne): locate the error that causes transferring TF_STRING to this function throws an exception.
+ return constant_op.constant(graph_proto.ToByteArray());
+ });
+ named_saveable_objects.Add(new NoRestoreSaveable(object_graph_tensor, Trackable.Constants.OBJECT_GRAPH_PROTO_KEY));
+ g.Exit();
+ return (named_saveable_objects, registered_savers);
+ }
+ else
+ {
+ using (new ops.NullContextManager())
+ {
+ var (named_saveable_objects, graph_proto, _, registered_savers) = serialize_gathered_objects(graph_view,
+ object_map, call_with_mapped_captures, saveables_cache);
+ var object_graph_tensor = tf_with(ops.device("/cpu:0"), _ =>
+ {
+ return constant_op.constant(graph_proto.ToString());
+ });
+ named_saveable_objects.Add(new NoRestoreSaveable(object_graph_tensor, Trackable.Constants.OBJECT_GRAPH_PROTO_KEY));
+ return (named_saveable_objects, registered_savers);
+ }
+ }
+ }
+
+ public static (IList, TrackableObjectGraph, object?, IDictionary>?) serialize_gathered_objects(ObjectGraphView graph_view,
+ IDictionary object_map, bool call_with_mapped_captures, object? saveables_cache = null)
+ {
+ var (trackable_objects, node_paths) = graph_view.breadth_first_traversal();
+ Dictionary object_names = new();
+ foreach (var pair in node_paths)
+ {
+ object_names[pair.Key] = TrackableUtils.object_path_to_string(pair.Value);
+ }
+
+ Dictionary node_ids = new();
+ for (int i = 0; i < trackable_objects.Count; i++)
+ {
+ node_ids[trackable_objects[i]] = i;
+ }
+
+ var slot_variables = CheckPointUtils.serialize_slot_variables(trackable_objects, node_ids, object_names);
+ var object_graph_proto = fill_object_graph_proto(graph_view, trackable_objects, node_ids, slot_variables);
+ var (named_saveable_objects, feed_additions, registered_savers) = add_attributes_to_object_graph(
+ trackable_objects, object_graph_proto, node_ids, object_names, object_map, call_with_mapped_captures,
+ saveables_cache);
+
+ CheckPointUtils.add_checkpoint_values_check(object_graph_proto);
+ return (named_saveable_objects, object_graph_proto, feed_additions, registered_savers);
+ }
+
+ private static TrackableObjectGraph fill_object_graph_proto(ObjectGraphView graph_view, IList trackable_objects,
+ IDictionary node_ids,
+ IDictionary>
+ slot_variables)
+ {
+ TrackableObjectGraph object_graph_proto = new();
+ for (int i = 0; i < trackable_objects.Count; i++)
+ {
+ var trackable = trackable_objects[i];
+ Debug.Assert(node_ids[trackable] == i);
+ var object_proto = new TrackableObjectGraph.Types.TrackableObject();
+ if (slot_variables.TryGetValue(trackable, out var slots))
+ {
+ object_proto.SlotVariables.AddRange(slots);
+ }
+ object_graph_proto.Nodes.Add(object_proto);
+ foreach (var child in graph_view.list_children(trackable))
+ {
+ object_proto.Children.Add(new TrackableObjectGraph.Types.TrackableObject.Types.ObjectReference()
+ { NodeId = node_ids[child.Refer], LocalName = child.Name });
+ }
+ }
+
+ return object_graph_proto;
+ }
+
+ private static (IList, object?, IDictionary>?) add_attributes_to_object_graph(
+ IList trackable_objects,
+ TrackableObjectGraph object_graph_proto, IDictionary node_ids,
+ IDictionary object_names, IDictionary object_map,
+ bool call_with_mapped_captures, object? saveables_cache = null)
+ {
+ int cnt = Math.Min(trackable_objects.Count, object_graph_proto.Nodes.Count);
+ for (int i = 0; i < cnt; i++)
+ {
+ Debug.Assert(node_ids[trackable_objects[i]] == i);
+ }
+
+ var (checkpoint_factory_map, unmmaped_registered_savers) =
+ get_checkpoint_factories_and_keys(object_names, object_map);
+
+ // skip the process of registered savers
+
+ var (named_saveable_objects, feed_additions) = generate_saveable_objects(checkpoint_factory_map,
+ object_graph_proto, node_ids, object_map, call_with_mapped_captures, saveables_cache);
+ return (named_saveable_objects, feed_additions, null);
+ }
+
+ public static (IList, object?) generate_saveable_objects(
+ IDictionary> checkpoint_factory_map,
+ TrackableObjectGraph? object_graph_proto, IDictionary? node_ids,
+ IDictionary object_map, bool call_with_mapped_captures, object? saveables_cache = null)
+ {
+ List named_saveable_objects = new();
+ foreach (var pair in checkpoint_factory_map)
+ {
+ var trackable = pair.Key;
+ var factory_data_list = pair.Value;
+ bool fill_object_proto = object_graph_proto is not null && node_ids is not null;
+ TrackableObjectGraph.Types.TrackableObject object_proto = null!;
+ if (fill_object_proto)
+ {
+ object_proto = object_graph_proto.Nodes[node_ids[trackable]];
+ }
+
+ var object_to_save = CheckPointUtils.get_mapped_trackable(trackable, object_map);
+ // skip cache
+
+ foreach (var factory_data in factory_data_list)
+ {
+ var name = factory_data.name;
+ var key = factory_data.checkpoint_key;
+ var maybe_saveable = saveable_object_util.create_saveable_object(name, key, factory_data.factory);
+
+ // TODO: tensorflow python has a process with callable `saveable_factory`.
+ List saveables = new();
+ if (maybe_saveable.TryPickT1(out var s, out var variable))
+ {
+ saveables.Add(s);
+ }
+ else
+ {
+ saveables.AddRange(saveable_object_util.saveable_objects_for_op(variable as Trackable, key));
+ }
+
+ foreach (var saveable in saveables)
+ {
+ if (!saveable.name.Contains(key))
+ {
+ throw new AssertionError($"The object {trackable} produced a SaveableObject with name " +
+ $"'{saveable.name}' for attribute '{name}'. Expected a name" +
+ $" containing '{key}'.");
+ }
+ }
+
+ // skip the process of PythonState
+
+ named_saveable_objects.AddRange(saveables);
+
+ if(!fill_object_proto) continue;
+
+ // skip the process of `TrackableSaveable` because of lack of APIs.
+
+ object_proto!.Attributes.Add(new TrackableObjectGraph.Types.TrackableObject.Types.SerializedTensor()
+ { Name = name, CheckpointKey = key, FullName = CheckPointUtils.get_full_name(object_to_save) });
+ }
+ }
+
+ return (named_saveable_objects, null);
+ }
+}
+
+public record class CheckpointFactoryData
+(
+ Func> factory,
+ string name,
+ string checkpoint_key
+);
diff --git a/src/TensorFlowNET.Core/Checkpoint/SaveableCompat.cs b/src/TensorFlowNET.Core/Checkpoint/SaveableCompat.cs
new file mode 100644
index 000000000..fa441d799
--- /dev/null
+++ b/src/TensorFlowNET.Core/Checkpoint/SaveableCompat.cs
@@ -0,0 +1,16 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+using Tensorflow.Train;
+
+namespace Tensorflow.Checkpoint
+{
+ internal static class SaveableCompat
+ {
+ public static string? get_saveable_name(Trackable cls_or_obj)
+ {
+ // TODO: implement it with Attribute.
+ return null;
+ }
+ }
+}
diff --git a/src/TensorFlowNET.Core/Checkpoint/TrackableView.cs b/src/TensorFlowNET.Core/Checkpoint/TrackableView.cs
new file mode 100644
index 000000000..dab6d5d97
--- /dev/null
+++ b/src/TensorFlowNET.Core/Checkpoint/TrackableView.cs
@@ -0,0 +1,82 @@
+using System;
+using Tensorflow.Train;
+using System.Collections.Generic;
+using System.IO;
+using Tensorflow.Keras.Saving.SavedModel;
+
+namespace Tensorflow.Checkpoint;
+
+public class TrackableView
+{
+ protected WeakReference _root_ref;
+ public TrackableView(Trackable obj)
+ {
+ _root_ref = new WeakReference(obj);
+ }
+
+ public TrackableView(WeakReference obj)
+ {
+ _root_ref = obj;
+ }
+
+ public virtual IDictionary children(Trackable obj, SaveType save_type = SaveType.CHECKPOINT, IDictionary>? cache = null)
+ {
+ obj._maybe_initialize_trackable();
+ Dictionary children = new();
+ // Note: in python the return type of `Trackable._trackable_children` is not fixed.
+ // Therefore it uses `convert_to_trackable` to have an extra process.
+ foreach (var pair in obj._trackable_children(save_type, cache))
+ {
+ children[pair.Key] = pair.Value;
+ }
+ return children;
+ }
+
+ public Trackable Root
+ {
+ get
+ {
+ if (_root_ref.TryGetTarget(out Trackable res))
+ {
+ return res;
+ }
+ else
+ {
+ throw new InvalidDataException(
+ "Cannot get the object from the weak reference. Please consider if a null reference is passed to the constructor.");
+ }
+ }
+ }
+
+ ///
+ /// Returns a list of all nodes and its paths from self.root using a breadth first traversal.
+ /// Corresponding to tensorflow/python/checkpoint/trackable_view.Trackable._descendants_with_paths
+ ///
+ protected (IList, IDictionary>) _descendants_with_paths()
+ {
+ List bfs_sorted = new();
+ Queue to_visit = new();
+ to_visit.Enqueue(Root);
+ Dictionary> node_paths = new();
+ node_paths[this.Root] = new List();
+ while (!to_visit.empty())
+ {
+ var current_trackable = to_visit.Dequeue();
+ bfs_sorted.Add(current_trackable);
+ var children_dict = this.children(current_trackable);
+ foreach (var name in children_dict.Keys)
+ {
+ var dependency = children_dict[name];
+ if (!node_paths.ContainsKey(dependency))
+ {
+ var list = new List(node_paths[current_trackable]);
+ list.Add(new TrackableReference(name, dependency));
+ node_paths[dependency] = list;
+ to_visit.Enqueue(dependency);
+ }
+ }
+ }
+
+ return (bfs_sorted, node_paths);
+ }
+}
\ No newline at end of file
diff --git a/src/TensorFlowNET.Core/Checkpoint/c_api.checkpoint.cs b/src/TensorFlowNET.Core/Checkpoint/c_api.checkpoint.cs
new file mode 100644
index 000000000..f956e3337
--- /dev/null
+++ b/src/TensorFlowNET.Core/Checkpoint/c_api.checkpoint.cs
@@ -0,0 +1,27 @@
+using System.Runtime.InteropServices;
+using Tensorflow.Checkpoint;
+
+namespace Tensorflow
+{
+ public unsafe partial class c_api
+ {
+ [DllImport(TensorFlowLibName)]
+ internal static extern SafeCheckpointReaderHandle TF_NewCheckpointReader(string filename, SafeStatusHandle status);
+ [DllImport(TensorFlowLibName)]
+ internal static extern void TF_DeleteCheckpointReader(IntPtr reader);
+ [DllImport(TensorFlowLibName)]
+ internal static extern int TF_CheckpointReaderHasTensor(SafeCheckpointReaderHandle reader, string name);
+ [DllImport(TensorFlowLibName)]
+ internal static extern IntPtr TF_CheckpointReaderGetVariable(SafeCheckpointReaderHandle reader, int index);
+ [DllImport(TensorFlowLibName)]
+ internal static extern int TF_CheckpointReaderSize(SafeCheckpointReaderHandle reader);
+ [DllImport(TensorFlowLibName)]
+ internal static extern TF_DataType TF_CheckpointReaderGetVariableDataType(SafeCheckpointReaderHandle reader, string name);
+ [DllImport(TensorFlowLibName)]
+ internal static extern void TF_CheckpointReaderGetVariableShape(SafeCheckpointReaderHandle reader, string name, long[] dims, int num_dims, SafeStatusHandle status);
+ [DllImport(TensorFlowLibName)]
+ internal static extern int TF_CheckpointReaderGetVariableNumDims(SafeCheckpointReaderHandle reader, string name);
+ [DllImport(TensorFlowLibName)]
+ internal static extern SafeTensorHandle TF_CheckpointReaderGetTensor(SafeCheckpointReaderHandle reader, string name, SafeStatusHandle status);
+ }
+}
diff --git a/src/TensorFlowNET.Core/Checkpoint/checkpoint.cs b/src/TensorFlowNET.Core/Checkpoint/checkpoint.cs
new file mode 100644
index 000000000..30d45e82c
--- /dev/null
+++ b/src/TensorFlowNET.Core/Checkpoint/checkpoint.cs
@@ -0,0 +1,582 @@
+using Google.Protobuf;
+using System;
+using System.Collections.Generic;
+using System.Diagnostics;
+using System.Linq;
+using Tensorflow.Contexts;
+using Tensorflow.Eager;
+using Tensorflow.Train;
+using Tensorflow.Exceptions;
+using static Tensorflow.TrackableObjectGraph.Types.TrackableObject.Types;
+using static Tensorflow.Binding;
+using Tensorflow.Operations;
+using Newtonsoft.Json;
+using Tensorflow.Training;
+using OneOf;
+
+namespace Tensorflow.Checkpoint;
+
+///
+/// Saves and restores a `Trackable` object and its dependencies.
+///
+public class TrackableSaver
+{
+ private ObjectGraphView _graph_view;
+ private Tensor _cached_save_operation;
+ private TrackableObjectGraph _last_save_object_graph;
+ private Tensor? _object_graph_feed_tensor = null;
+ private Tensor? _file_prefix_feed_tensor = null;
+ private Tensor? _file_prefix_placeholder = null;
+ private Dictionary? _object_map = null;
+ private object? _cache = null;
+ public Tensor? FilePrefixPlaceHolder
+ {
+ get
+ {
+ return _file_prefix_placeholder;
+ }
+ set
+ {
+ _file_prefix_placeholder = value;
+ }
+ }
+ public TrackableSaver(ObjectGraphView graph_view)
+ {
+ _graph_view = graph_view;
+
+ // TODO: cache when not executing eagerly.
+ // including `_cache`, `_file_prefix_feed_tensor`, `_file_prefix_placeholder`
+ // `_object_graph_feed_tensor`, `_object_map`, `_restore_op_cache`, `_saveables_cache`
+
+ }
+
+ private (IDictionary>>>, IDictionary, IDictionary>, TrackableObjectGraph)
+ gather_serialized_tensors(Tensor? object_graph_tensor = null)
+ {
+ var (serialized_tensors, feed_additions, registered_savers, graph_proto) = SaveUtil.serialize_graph_view(_graph_view, _object_map, cache:_cache);
+
+ // TODO: cache.
+
+ if(object_graph_tensor is null)
+ {
+ tf_with(ops.device("/cpu:0"), _ =>
+ {
+ object_graph_tensor = constant_op.constant(graph_proto.ToByteArray());
+ });
+ }
+ else
+ {
+ feed_additions[object_graph_tensor] = graph_proto.ToByteArray();
+ }
+ Debug.Assert(!serialized_tensors.ContainsKey(Trackable.None) || !serialized_tensors[Trackable.None].ContainsKey(Trackable.Constants.OBJECT_GRAPH_PROTO_KEY));
+ if (!serialized_tensors.ContainsKey(Trackable.None))
+ {
+ serialized_tensors[Trackable.None] = new Dictionary>>();
+ }
+ serialized_tensors[Trackable.None][Trackable.Constants.OBJECT_GRAPH_PROTO_KEY] = new Dictionary>();
+ serialized_tensors[Trackable.None][Trackable.Constants.OBJECT_GRAPH_PROTO_KEY].Add(saveable_object_util.NO_SLICE_SPEC_KEY, object_graph_tensor);
+ return (serialized_tensors, feed_additions, registered_savers, graph_proto);
+ }
+
+ private (Tensor, IDictionary) save_cached_when_graph_building(Tensor file_prefix, Tensor object_graph_tensor, CheckpointOptions options)
+ {
+ var (serialized_tensors, feed_additions, registered_savers, graph_proto) = gather_serialized_tensors(object_graph_tensor);
+
+ Func<(Tensor, IDictionary)> run_save = () =>
+ {
+ if (_last_save_object_graph != graph_proto || tf.Context.executing_eagerly() || ops.inside_function())
+ {
+ var saver = new MultiDeviceSaver(serialized_tensors, registered_savers);
+ var save_op = saver.save(file_prefix, options);
+
+ // tensorflow python: `with ops.device("/cpu:0"):`
+ using (ops.control_dependencies(new object[] { save_op }))
+ {
+ _cached_save_operation = array_ops.identity(file_prefix);
+ }
+ _last_save_object_graph = graph_proto;
+ }
+ return (_cached_save_operation, feed_additions);
+ };
+
+ if (options.experimental_enable_async_checkpoint)
+ {
+ throw new NotImplementedException();
+ }
+
+ return run_save();
+ }
+
+ private (Tensor, IDictionary) save_cached_when_graph_building(string file_prefix, Tensor object_graph_tensor, CheckpointOptions options)
+ {
+ var (serialized_tensors, feed_additions, registered_savers, graph_proto) = gather_serialized_tensors(object_graph_tensor);
+
+ Func<(Tensor, IDictionary)> run_save = () =>
+ {
+ if (_last_save_object_graph != graph_proto || tf.Context.executing_eagerly() || ops.inside_function())
+ {
+ var saver = new MultiDeviceSaver(serialized_tensors, registered_savers);
+ var save_op = saver.save(file_prefix, options);
+
+ // tensorflow python: `with ops.device("/cpu:0"):`
+ using (ops.control_dependencies(new object[] {save_op} ))
+ {
+ _cached_save_operation = array_ops.identity(tf.constant(file_prefix));
+ }
+ _last_save_object_graph = graph_proto;
+ }
+ return (_cached_save_operation, feed_additions);
+ };
+
+ if (options.experimental_enable_async_checkpoint)
+ {
+ throw new NotImplementedException();
+ }
+
+ return run_save();
+ }
+
+ // TODO: parameter write_done_callback
+ public Tensor save(string file_prefix, int? checkpoint_number = null, Session? session = null,
+ CheckpointOptions? options = null)
+ {
+ if (options is null)
+ {
+ options = new CheckpointOptions();
+ }
+
+ Dictionary feed_dict = new();
+ bool use_session = (!tf.Context.executing_eagerly() && !ops.inside_function());
+ if (checkpoint_number is not null)
+ {
+ file_prefix = $"{file_prefix}-{checkpoint_number?.ToString()}";
+ }
+
+ Tensor file_prefix_tensor;
+ Tensor object_graph_tensor;
+ string file_prefix_to_save;
+ if (use_session)
+ {
+ if (_object_graph_feed_tensor is null)
+ {
+ // In python there is `with ops.device("/cpu:0")`.
+ _object_graph_feed_tensor = constant_op.constant("", TF_DataType.TF_STRING);
+ _file_prefix_feed_tensor = constant_op.constant("", TF_DataType.TF_STRING);
+ }
+
+ object_graph_tensor = _object_graph_feed_tensor;
+ file_prefix_tensor = _file_prefix_feed_tensor;
+ feed_dict[file_prefix_tensor] = file_prefix;
+ file_prefix_to_save = "";
+ }
+ else
+ {
+ // In python there is `with ops.device("/cpu:0")`.
+ file_prefix_tensor = ops.convert_to_tensor(file_prefix, TF_DataType.TF_STRING);
+ object_graph_tensor = null;
+ file_prefix_to_save = file_prefix;
+ }
+
+ var (save_path, new_feed_additions) =
+ save_cached_when_graph_building(file_prefix_to_save, object_graph_tensor, options);
+
+ if (new_feed_additions is not null)
+ {
+ foreach (var pair in new_feed_additions)
+ {
+ feed_dict.Add(pair.Key, pair.Value);
+ }
+ }
+ if(!use_session)
+ {
+ session = null;
+ }
+ else if (session is null)
+ {
+ session = new Session(); // In python it uses `get_session`.
+ }
+
+ if (session is not null)
+ {
+ var s = feed_dict.Select(x => new FeedItem(x.Key, x.Value)).ToArray();
+ return session.run((Tensor)save_path, s);
+ }
+ else if (use_session)
+ {
+ throw new RuntimeError($"Unable to save checkpoint to \"{file_prefix}\" " +
+ "in graph mode without a default session. Please use " +
+ "`with tf.Session():` to create a session.");
+ }
+ else
+ {
+ return save_path;
+ }
+ }
+
+ public LoadStatus restore(string? save_path, CheckpointOptions? options = null)
+ {
+ if (options is null)
+ {
+ options = new CheckpointOptions();
+ }
+ if(save_path is null)
+ {
+ return new InitializationOnlyStatus(_graph_view, ops.uid());
+ }
+
+ CheckpointReader reader = new CheckpointReader(save_path);
+ bool graph_building = tf.Context.executing_eagerly();
+ Dictionary dtype_map = null;
+ if (!graph_building)
+ {
+ dtype_map = reader.VariableToDataTypeMap;
+ }
+ Tensor object_graph_string = reader.GetTensor(Trackable.Constants.OBJECT_GRAPH_PROTO_KEY, dtype: TF_DataType.TF_STRING);
+
+ Dictionary file_prefix_feed_dict;
+ Tensor file_prefix_tensor = null;
+ if (graph_building)
+ {
+ if(_file_prefix_placeholder is null)
+ {
+ _file_prefix_placeholder = tf_with(ops.device("/cpu:0"), _ =>
+ {
+ return constant_op.constant("model");
+ });
+ }
+ file_prefix_tensor = _file_prefix_placeholder;
+ file_prefix_feed_dict = new();
+ file_prefix_feed_dict[_file_prefix_placeholder] = save_path;
+ }
+ else
+ {
+ file_prefix_tensor = tf_with(ops.device("/cpu:0"), _ =>
+ {
+ return constant_op.constant(save_path);
+ });
+ file_prefix_feed_dict = null;
+ }
+ TrackableObjectGraph object_graph_proto = new();
+ if(object_graph_string.ndim > 0)
+ {
+ object_graph_proto.MergeFrom(object_graph_string.BufferToArray());
+ }
+ else
+ {
+ object_graph_proto.MergeFrom(object_graph_string.StringBytes()[0]);
+ }
+ CheckpointRestoreCoordinator checkpoint = new CheckpointRestoreCoordinator(
+ object_graph_proto: object_graph_proto,
+ save_path: save_path,
+ save_path_tensor: file_prefix_tensor,
+ reader: reader,
+ restore_op_cache: null,
+ graph_view: _graph_view,
+ options: options,
+ saveables_cache: null
+ );
+
+ new CheckpointPosition(checkpoint, 0).restore(_graph_view.Root);
+
+ if(_graph_view.AttachedDependencies is not null)
+ {
+ foreach(var refer in _graph_view.AttachedDependencies)
+ {
+ if(refer.Name == "root")
+ {
+ continue;
+ }
+ int? proto_id = null;
+ // Find proto ID of attached dependency (if it is in the proto).
+ foreach (var proto_refer in object_graph_proto.Nodes[0].Children)
+ {
+ if(proto_refer.LocalName == refer.Name)
+ {
+ proto_id = proto_refer.NodeId;
+ break;
+ }
+ }
+
+ if (proto_id is null)
+ {
+ continue;
+ }
+
+ // Object has already been restored. This can happen when there's an
+ // indirect connection from the attached object to the root.
+ if (checkpoint.ObjectByProtoId.ContainsKey(proto_id.Value))
+ {
+ continue;
+ }
+
+ new CheckpointPosition(checkpoint, proto_id.Value).restore(refer.Refer);
+ }
+ }
+
+ return new CheckpointLoadStatus(checkpoint, file_prefix_feed_dict, _graph_view);
+ }
+}
+
+public class CheckpointRestoreCoordinator
+{
+ private CheckpointOptions _options;
+ private TrackableObjectGraph _object_graph_proto;
+ private int _restore_uid;
+ private HashSet _matched_proto_ids;
+ private Tensor _save_path_tensor;
+ private string _save_path_string;
+ private CheckpointReader _reader;
+ private Dictionary _dtype_map;
+ private Dictionary _shape_map;
+ private ObjectGraphView _graph_view;
+ private Dictionary> _slot_restorations;
+ private bool _expect_partial_attr;
+ private List _restore_ops;
+ private List _all_trackables;
+ private Dictionary _object_by_proto_id;
+ private Dictionary _restore_ops_by_name;
+ private Dictionary> _deferred_slot_restorations;
+ private Dictionary> _unused_attributes;
+
+ public CheckpointRestoreCoordinator(TrackableObjectGraph object_graph_proto, string save_path, Tensor save_path_tensor,
+ CheckpointReader reader, object? restore_op_cache, ObjectGraphView graph_view, CheckpointOptions options, object? saveables_cache)
+ {
+ // TODO(Rinne): cache.
+ _options = options;
+ _object_graph_proto = object_graph_proto;
+ _restore_uid = ops.uid();
+ _save_path_tensor = save_path_tensor;
+ _save_path_string = save_path;
+ _reader = reader;
+ if(_reader is null)
+ {
+ _reader = new CheckpointReader(save_path);
+ }
+ _dtype_map = _reader.VariableToDataTypeMap;
+ _shape_map = _reader.VariableToShapeMap;
+ _graph_view = graph_view;
+ _restore_ops = new List();
+ _restore_ops_by_name = new Dictionary();
+ _all_trackables = new List();
+ _matched_proto_ids = new HashSet();
+ _object_by_proto_id = new Dictionary();
+ _slot_restorations = new Dictionary