diff --git a/.gitattributes b/.gitattributes
new file mode 100644
index 000000000..176a458f9
--- /dev/null
+++ b/.gitattributes
@@ -0,0 +1 @@
+* text=auto
diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml
new file mode 100644
index 000000000..1f011157e
--- /dev/null
+++ b/.github/FUNDING.yml
@@ -0,0 +1,12 @@
+# These are supported funding model platforms
+
+github: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2]
+patreon: # Replace with a single Patreon username
+open_collective: # Replace with a single Open Collective username
+ko_fi: # Replace with a single Ko-fi username
+tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
+community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
+liberapay: # Replace with a single Liberapay username
+issuehunt: # Replace with a single IssueHunt username
+otechie: # Replace with a single Otechie username
+custom: []# Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']
diff --git a/.github/ISSUE_TEMPLATE/blank_issue.yml b/.github/ISSUE_TEMPLATE/blank_issue.yml
new file mode 100644
index 000000000..bbd855958
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/blank_issue.yml
@@ -0,0 +1,12 @@
+name: Blank Issue
+description: Submit an issue about Tensorflow.NET.
+labels: [Blank Issue]
+body:
+ - type: textarea
+ id: description
+ attributes:
+ label: Description
+ description: Please describe the issue here.
+ placeholder: Description
+ validations:
+ required: false
diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml
new file mode 100644
index 000000000..14e237951
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/bug_report.yml
@@ -0,0 +1,48 @@
+name: BUG Report
+description: Report a BUG of Tensorflow.NET.
+title: "[BUG Report]: "
+labels: [bug-report]
+body:
+ - type: markdown
+ attributes:
+ value: |
+ We welcome bug reports! Any unexpected behavior could be a BUG and this template help us gather the information to fix it.
+ - type: textarea
+ id: background
+ attributes:
+ label: Description
+ description: Please share a clear and concise description of the problem.
+ placeholder: Description
+ validations:
+ required: true
+ - type: textarea
+ id: repro-steps
+ attributes:
+ label: Reproduction Steps
+ description: |
+ Please include minimal steps to reproduce the problem if possible. E.g.: the smallest possible code snippet; or a small project, with steps to run it. It will greatly help us to locate the reason of the problem.
+ placeholder: Minimal Reproduction
+ validations:
+ required: false
+ - type: textarea
+ id: known-workarounds
+ attributes:
+ label: Known Workarounds
+ description: |
+ Please provide a description of any known workarounds.
+ placeholder: Known Workarounds
+ validations:
+ required: false
+ - type: textarea
+ id: configuration
+ attributes:
+ label: Configuration and Other Information
+ description: |
+ Please provide more information on your configuration:
+ * Which version of Tensorflow.NET is the code depending on?
+ * Which version of .NET runtime is the code running on?
+ * What is the OS?
+ * Any other information about this problem?
+ placeholder: Configuration
+ validations:
+ required: false
\ No newline at end of file
diff --git a/.github/ISSUE_TEMPLATE/documention_issue.yml b/.github/ISSUE_TEMPLATE/documention_issue.yml
new file mode 100644
index 000000000..f8a04e40f
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/documention_issue.yml
@@ -0,0 +1,30 @@
+name: Documentation Issue
+description: Report an issue about Tensorflow.NET ducumention or require a documention.
+title: "[Documention Issue]: "
+labels: [Documention Issue]
+body:
+ - type: markdown
+ attributes:
+ value: |
+ Welcome to suggest to Tensorflow.NET documention! This template will help us gather the information we need to improve it.
+ - type: textarea
+ id: brief-description
+ attributes:
+ label: Brief Description
+ description: Please describe the problem or the requst for new documention here.
+ placeholder: Description
+ validations:
+ required: true
+ - type: textarea
+ id: alternatives
+ attributes:
+ label: Alternatives
+ description: |
+ Please provide some alternative information here, if any.
+ placeholder: Alternatives
+ validations:
+ required: false
+ - type: markdown
+ attributes:
+ value: |
+ Thanks for your contributing!
diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml
new file mode 100644
index 000000000..9ce3f1663
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/feature_request.yml
@@ -0,0 +1,50 @@
+name: Feature Request
+description: Request/Propose a new feature of Tensorflow.NET.
+title: "[Feature Request]: "
+labels: [feature-request]
+body:
+ - type: markdown
+ attributes:
+ value: |
+ We welcome feature proposal/request! This template will help us gather the information we need to implement the new feature.
+ - type: textarea
+ id: background
+ attributes:
+ label: Background and Feature Description
+ description: Please describe the purpose and value of the new feature here. If the feature is linked to a specific problem, please describe it or put the link here.
+ placeholder: Purpose
+ validations:
+ required: true
+ - type: textarea
+ id: api-proposal
+ attributes:
+ label: API Definition and Usage
+ description: |
+ Please tell us the new API related to the requested feature, if any.
+ placeholder: API declaration (no method bodies)
+ value: |
+ ```cs
+ public Tensor NewFunc(Tensor x, int y);
+
+ var result = NewFunc(input, index);
+ ```
+ validations:
+ required: false
+ - type: textarea
+ id: alternatives
+ attributes:
+ label: Alternatives
+ description: |
+ Please provide some alternative information of the feature, if any. For example, if you request a feature which depends on a specific device, please provide the device information.
+ placeholder: Alternatives
+ validations:
+ required: false
+ - type: textarea
+ id: risks
+ attributes:
+ label: Risks
+ description: |
+ Please mention any risks that to your knowledge the API proposal might entail, such as breaking changes, performance regressions, etc.
+ placeholder: Risks
+ validations:
+ required: false
\ No newline at end of file
diff --git a/.github/ISSUE_TEMPLATE/performance_issue.yml b/.github/ISSUE_TEMPLATE/performance_issue.yml
new file mode 100644
index 000000000..cbe86d329
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/performance_issue.yml
@@ -0,0 +1,48 @@
+name: Performance Issue
+description: Submit an issue about performance problem or regression of Tensorflow.NET.
+title: "[Performance Issue]: "
+labels: [Performance Issue]
+body:
+ - type: markdown
+ attributes:
+ value: |
+ We welcome issues about Tensorflow.NET performance! This template will help us gather the information we need to locate the problem improve the performance.
+ - type: textarea
+ id: brief-description
+ attributes:
+ label: Brief Description
+ description: Please give a brief description about the performance issue here.
+ placeholder: Description
+ validations:
+ required: true
+ - type: textarea
+ id: device-and-context
+ attributes:
+ label: Device and Context
+ description: |
+ Please describe the device and context you used when you encounter the performance problem/regression.
+ placeholder: Device and Context
+ validations:
+ required: true
+ - type: textarea
+ id: benchmark
+ attributes:
+ label: Benchmark
+ description: |
+ We will appreciate it if you'd like to provide benchmark comparison of the performance issue.
+ placeholder: Benchmark
+ validations:
+ required: false
+ - type: textarea
+ id: alternatives
+ attributes:
+ label: Alternatives
+ description: |
+ Please provide some alternative information of the performance issue here, if any. For example, we'll appreciate it if you'd like to provide the the code to reproduce the performance problem.
+ placeholder: Alternatives
+ validations:
+ required: false
+ - type: markdown
+ attributes:
+ value: |
+ Thanks for your contributing!
diff --git a/.github/ISSUE_TEMPLATE/question.yml b/.github/ISSUE_TEMPLATE/question.yml
new file mode 100644
index 000000000..ca38be340
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/question.yml
@@ -0,0 +1,30 @@
+name: Question
+description: Ask any question about Tensorflow.NET and discuss with community members.
+title: "[Question]: "
+labels: [Question]
+body:
+ - type: markdown
+ attributes:
+ value: |
+ Any question about Tensorflow.NET is welcomed! This template will help us get your point.
+ - type: textarea
+ id: description
+ attributes:
+ label: Description
+ description: Please describe your question here.
+ placeholder: Description
+ validations:
+ required: true
+ - type: textarea
+ id: alternatives
+ attributes:
+ label: Alternatives
+ description: |
+ Please provide some alternative information here, if any.
+ placeholder: Alternatives
+ validations:
+ required: false
+ - type: markdown
+ attributes:
+ value: |
+ We are always willing to answer your questions!
diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml
new file mode 100644
index 000000000..9fd34fc49
--- /dev/null
+++ b/.github/workflows/build_and_test.yml
@@ -0,0 +1,66 @@
+# This workflow will build a .NET project
+# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-net
+
+name: build_and_test
+
+on:
+ push:
+ branches: [ "master" ]
+ pull_request:
+ branches: [ "master" ]
+ types: ["opened", "reopened", "synchronize", "ready_for_review", "auto_merge_enabled"]
+
+jobs:
+ windows:
+
+ runs-on: windows-latest
+
+ steps:
+ - uses: actions/checkout@v3
+ - name: Setup .NET 6
+ uses: actions/setup-dotnet@v3
+ with:
+ dotnet-version: 6.0.x
+ - name: Restore dependencies
+ run: dotnet restore
+ - name: Build CPU version
+ run: dotnet build --no-restore
+ - name: Test CPU version
+ run: dotnet test --no-build --verbosity normal
+ - name: uninstall redist cpu for unit tests
+ run: dotnet remove tools/Tensorflow.UnitTest.RedistHolder package SciSharp.TensorFlow.Redist
+ - name: install redist gpu for unit tests
+ run: dotnet add tools/Tensorflow.UnitTest.RedistHolder package SciSharp.TensorFlow.Redist-Windows-GPU
+ - name: Restore dependencies
+ run: dotnet restore
+ - name: Build GPU version
+ run: dotnet build --no-restore
+# - name: Test GPU version
+# run: dotnet test --no-build --verbosity normal
+
+ linux:
+
+ runs-on: ubuntu-latest
+
+ steps:
+ - uses: actions/checkout@v3
+ - name: Setup .NET
+ uses: actions/setup-dotnet@v3
+ with:
+ dotnet-version: 6.0.x
+ - name: Restore dependencies
+ run: dotnet restore
+ - name: Build CPU version
+ run: dotnet build --no-restore
+ - name: Test CPU version
+ run: dotnet test --no-build --verbosity normal
+ - name: uninstall redist cpu for unit tests
+ run: dotnet remove tools/Tensorflow.UnitTest.RedistHolder package SciSharp.TensorFlow.Redist
+ - name: install redist gpu for unit tests
+ run: dotnet add tools/Tensorflow.UnitTest.RedistHolder package SciSharp.TensorFlow.Redist-Linux-GPU
+ - name: Restore dependencies
+ run: dotnet restore
+ - name: Build GPU version
+ run: dotnet build --no-restore
+# - name: Test GPU version
+# run: dotnet test --no-build --verbosity normal
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
new file mode 100644
index 000000000..02601764c
--- /dev/null
+++ b/.github/workflows/release.yml
@@ -0,0 +1,62 @@
+name: auto-release
+
+on:
+ workflow_run:
+ workflows: ["release-prepare"]
+ types:
+ - completed
+
+env:
+ MYGET_API_TOKEN: ${{ SECRETS.MYGET_API_KEY }}
+ GITHUB_TOKEN: ${{ SECRETS.RINNE_GITHUB_TOKEN }}
+
+jobs:
+ release_to_myget:
+ runs-on: windows-latest
+# needs: run-semantic-release
+
+ steps:
+ - uses: actions/checkout@v3
+ - name: Setup .NET 6.0.x SDK
+ uses: actions/setup-dotnet@v3
+ with:
+ dotnet-version: 6.0.x
+
+ - name: Check .NET info
+ run: dotnet --info
+
+ - name: Install dependencies
+ run: dotnet restore
+
+ - name: Build solution
+ run: dotnet build -c Release --no-restore
+
+ - name: Pack packages
+ run: |
+ git fetch --unshallow;
+ git config remote.origin.fetch "+refs/heads/*:refs/remotes/origin/*";
+ git fetch origin;
+ $LastTag = git describe --tags;
+ $DroppedTag = ($LastTag).TrimStart('v');
+ echo "Last tag is: $DroppedTag";
+ $Suffix = "-nightly"
+ $Version = "${DroppedTag}${Suffix}";
+ echo "Publishing version: $Version";
+ dotnet pack ./src/TensorFlowNET.Core/Tensorflow.Binding.csproj -c Release -o packages /p:PackageVersion=$Version /p:Version=$Version;
+ dotnet pack ./src/TensorFlowNET.Keras/Tensorflow.Keras.csproj -c Release -o packages /p:PackageVersion=$Version /p:Version=$Version;
+ dotnet pack ./src/TensorflowNET.Hub/Tensorflow.Hub.csproj -c Release -o packages /p:PackageVersion=$Version /p:Version=$Version;
+
+ if($LastExitCode -ne 0)
+ {
+ Write-Warning -Message "Pack packages warming, last exit code is ${LastExitCode}."
+ $LastExitCode = 0;
+ }
+
+ - name: Upload packages artifacts
+ uses: actions/upload-artifact@v4.0.0
+ with:
+ name: "drop-ci-packages"
+ path: './packages'
+
+ - name: Push TensorFlow.NET to myget.org
+ run: dotnet nuget push .\packages\TensorFlow*.nupkg --source https://www.myget.org/F/scisharp/api/v3/index.json -k ${{ secrets.MYGET_API_KEY }} --skip-duplicate
diff --git a/.github/workflows/release_prepare.yml b/.github/workflows/release_prepare.yml
new file mode 100644
index 000000000..b21c6665c
--- /dev/null
+++ b/.github/workflows/release_prepare.yml
@@ -0,0 +1,46 @@
+name: release-prepare
+
+on:
+ pull_request:
+ branches:
+ - master
+ types: [ closed ]
+
+env:
+ MYGET_API_TOKEN: ${{ SECRETS.MYGET_API_KEY }}
+ GITHUB_TOKEN: ${{ SECRETS.RINNE_GITHUB_TOKEN }}
+
+jobs:
+ build:
+ if: contains(github.event.pull_request.labels.*.name, 'auto-release')
+ runs-on: windows-latest
+
+ steps:
+ - uses: actions/checkout@v3
+ - name: Setup .NET 6.0.x SDK
+ uses: actions/setup-dotnet@v3
+ with:
+ dotnet-version: 6.0.x
+
+ - name: Check .NET info
+ run: dotnet --info
+
+ - name: Install dependencies
+ run: dotnet restore
+
+ - name: Build solution
+ run: dotnet build -c Release --no-restore
+
+# run-semantic-release:
+# runs-on: ubuntu-latest
+# needs: build
+
+# steps:
+# - name: Checkout
+# uses: actions/checkout@v2
+
+# - name: Run semantic-release
+# run: |
+# export PATH=$PATH:$(yarn global bin)
+# yarn global add semantic-release@17.4.3
+# semantic-release
\ No newline at end of file
diff --git a/.github/workflows/semantic.yml b/.github/workflows/semantic.yml
new file mode 100644
index 000000000..db8c06a3e
--- /dev/null
+++ b/.github/workflows/semantic.yml
@@ -0,0 +1,17 @@
+name: Semantic
+
+on:
+ pull_request:
+ branches: [ "master" ]
+
+jobs:
+ semantic-pull-request:
+ name: Semantic check
+ runs-on: windows-latest
+ steps:
+ - name: semantic-pull-request
+ uses: amannn/action-semantic-pull-request@v4
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ with:
+ validateSingleCommit: true
diff --git a/.gitignore b/.gitignore
index 1a6a75a22..231d8379a 100644
--- a/.gitignore
+++ b/.gitignore
@@ -62,7 +62,6 @@ StyleCopReport.xml
*_p.c
*_i.h
*.ilk
-*.meta
*.obj
*.iobj
*.pch
@@ -328,9 +327,15 @@ ASALocalRun/
# MFractors (Xamarin productivity tool) working folder
.mfractor/
-/tensorflowlib/win7-x64/native/libtensorflow.dll
-/tensorflowlib/osx/native/libtensorflow_framework.dylib
-/tensorflowlib/osx/native/libtensorflow.dylib
-/tensorflowlib/linux/native/libtensorflow_framework.so
-/tensorflowlib/linux/native/libtensorflow.so
-/src/TensorFlowNET.Core/tensorflow.dll
+/docs/build
+src/TensorFlowNET.Native/bazel-*
+src/TensorFlowNET.Native/c_api.h
+/.vscode
+test/TensorFlowNET.Examples/mnist
+
+
+# training model resources
+.resources
+/redist
+*.xml
+*.xsd
diff --git a/CODEOWNERS b/CODEOWNERS
new file mode 100644
index 000000000..ee3236a46
--- /dev/null
+++ b/CODEOWNERS
@@ -0,0 +1,3 @@
+# You can find more information about CODEOWNERS here: https://help.github.com/en/articles/about-code-owners
+# These owners will be the default owners for everything in the repo.
+* @Oceania2018
\ No newline at end of file
diff --git a/Directory.Build.props b/Directory.Build.props
new file mode 100644
index 000000000..065690ec9
--- /dev/null
+++ b/Directory.Build.props
@@ -0,0 +1,17 @@
+
+
+
+
+
+ true
+ $(NoWarn),1573,1591,1712
+
+
+
diff --git a/Directory.Build.targets b/Directory.Build.targets
new file mode 100644
index 000000000..341027f3c
--- /dev/null
+++ b/Directory.Build.targets
@@ -0,0 +1,3 @@
+
+
+
diff --git a/README.md b/README.md
index a0004962e..75cad0aa7 100644
--- a/README.md
+++ b/README.md
@@ -1,47 +1,261 @@
-# TensorFlow.NET
-TensorFlow.NET provides .NET Standard binding for [TensorFlow](https://www.tensorflow.org/).
+
+**TensorFlow.NET** (TF.NET) provides a .NET Standard binding for [TensorFlow](https://www.tensorflow.org/). It aims to implement the complete Tensorflow API in C# which allows .NET developers to develop, train and deploy Machine Learning models with the cross-platform .NET Standard framework. TensorFlow.NET has built-in Keras high-level interface and is released as an independent package [TensorFlow.Keras](https://www.nuget.org/packages/TensorFlow.Keras/).
+
+[](https://discord.gg/qRVm82fKTS)
+[](http://qm.qq.com/cgi-bin/qm/qr?_wv=1027&k=sN9VVMwbWjs5L0ATpizKKxOcZdEPMrp8&authKey=RLDw41bLTrEyEgZZi%2FzT4pYk%2BwmEFgFcrhs8ZbkiVY7a4JFckzJefaYNW6Lk4yPX&noverify=0&group_code=985366726)
[](https://gitter.im/sci-sharp/community)
-
+[](https://github.com/SciSharp/TensorFlow.NET/actions/workflows/build_and_test.yml)
+[](https://tensorflownet.readthedocs.io/en/latest/?badge=latest)
+[](https://www.nuget.org/packages/TensorFlow.NET)
+[](https://www.nuget.org/packages/TensorFlow.Keras)
+[](https://www.myget.org/feed/scisharp/package/nuget/Tensorflow.NET)
+[](https://996.icu/#/en_US)
+[](https://mybinder.org/v2/gh/javiercp/BinderTF.NET/master?urlpath=lab)
+
+English | [中文](docs/README-CN.md)
+
+> [!IMPORTANT]
+> We're happy that our work on tensorflow.net has attracted many users. However, at this time, none of the main maintainers of this repo is available for new features and bug fix. We won't refuse PRs and will help to review them.
+>
+> If you would like to be a contributor or maintainer of tensorflow.net, we'd like to help you to start up.
+>
+> We feel sorry for that and we'll resume the maintaining for this project once one of us has bandwidth for it.
+>
+
+*master branch and v0.100.x is corresponding to tensorflow v2.10, v0.6x branch is from tensorflow v2.6, v0.15-tensorflow1.15 is from tensorflow1.15. Please add `https://www.myget.org/F/scisharp/api/v3/index.json` to nuget source to use nightly release.*
-TensorFlow.NET is a member project of SciSharp stack.

-### How to use
-Download the pre-compiled dll [here](tensorflowlib) and place it in the bin folder.
+## Why Tensorflow.NET ?
-Import tensorflow.net.
-```cs
-using Tensorflow;
+`SciSharp STACK`'s mission is to bring popular data science technology into the .NET world and to provide .NET developers with a powerful Machine Learning tool set without reinventing the wheel. Since the APIs are kept as similar as possible you can immediately adapt any existing TensorFlow code in C# or F# with a zero learning curve. Take a look at a comparison picture and see how comfortably a TensorFlow/Python script translates into a C# program with TensorFlow.NET.
+
+
+
+SciSharp's philosophy allows a large number of machine learning code written in Python to be quickly migrated to .NET, enabling .NET developers to use cutting edge machine learning models and access a vast number of TensorFlow resources which would not be possible without this project.
+
+In comparison to other projects, like for instance [TensorFlowSharp](https://www.nuget.org/packages/TensorFlowSharp/) which only provide TensorFlow's low-level C++ API and can only run models that were built using Python, Tensorflow.NET makes it possible to build the pipeline of training and inference with pure C# and F#. Besides, Tensorflow.NET provides binding of Tensorflow.Keras to make it easy to transfer your code from python to .NET.
+
+[ML.NET](https://github.com/dotnet/machinelearning) also take Tensorflow.NET as one of the backends to train and infer your model, which provides better integration with .NET.
+
+## Documention
+
+Introduction and simple examples:[Tensorflow.NET Documents](https://scisharp.github.io/tensorflow-net-docs)
+
+Detailed documention:[The Definitive Guide to Tensorflow.NET](https://tensorflownet.readthedocs.io/en/latest/FrontCover.html)
+
+Examples:[TensorFlow.NET Examples](https://github.com/SciSharp/TensorFlow.NET-Examples)
+
+Troubleshooting of running example or installation:[Tensorflow.NET FAQ](tensorflowlib/README.md)
+
+## Usage
+
+### Installation
+
+You can search the package name in NuGet Manager, or use the commands below in package manager console.
+
+The installation contains two parts, the first is the main body:
+
+```sh
+### Install Tensorflow.NET
+PM> Install-Package TensorFlow.NET
+
+### Install Tensorflow.Keras
+PM> Install-Package TensorFlow.Keras
```
-Add two constants.
-```cs
-// Create a Constant op
-var a = tf.constant(4.0f);
-var b = tf.constant(5.0f);
-var c = tf.add(a, b);
+The second part is the computing support part. Only one of the following packages is needed, depending on your device and system.
-using (var sess = tf.Session())
-{
- var o = sess.run(c);
-}
```
+### CPU version for Windows and Linux
+PM> Install-Package SciSharp.TensorFlow.Redist
+
+### CPU version for MacOS
+PM> Install-Package SciSharp.TensorFlow.Redist-OSX
+
+### GPU version for Windows (CUDA and cuDNN are required)
+PM> Install-Package SciSharp.TensorFlow.Redist-Windows-GPU
+
+### GPU version for Linux (CUDA and cuDNN are required)
+PM> Install-Package SciSharp.TensorFlow.Redist-Linux-GPU
+```
+
+
+Two simple examples are given here to introduce the basic usage of Tensorflow.NET. As you can see, it's easy to write C# code just like that in Python.
+
+### Example - Linear Regression in `Eager` mode
+
+```csharp
+using static Tensorflow.Binding;
+using static Tensorflow.KerasApi;
+using Tensorflow;
+using Tensorflow.NumPy;
+
+// Parameters
+var training_steps = 1000;
+var learning_rate = 0.01f;
+var display_step = 100;
+
+// Sample data
+var X = np.array(3.3f, 4.4f, 5.5f, 6.71f, 6.93f, 4.168f, 9.779f, 6.182f, 7.59f, 2.167f,
+ 7.042f, 10.791f, 5.313f, 7.997f, 5.654f, 9.27f, 3.1f);
+var Y = np.array(1.7f, 2.76f, 2.09f, 3.19f, 1.694f, 1.573f, 3.366f, 2.596f, 2.53f, 1.221f,
+ 2.827f, 3.465f, 1.65f, 2.904f, 2.42f, 2.94f, 1.3f);
+var n_samples = X.shape[0];
-Feed placeholder.
-```cs
-// Create a placeholder op
-var a = tf.placeholder(tf.float32);
-var b = tf.placeholder(tf.float32);
-var c = tf.add(a, b);
+// We can set a fixed init value in order to demo
+var W = tf.Variable(-0.06f, name: "weight");
+var b = tf.Variable(-0.73f, name: "bias");
+var optimizer = keras.optimizers.SGD(learning_rate);
-using(var sess = tf.Session())
+// Run training for the given number of steps.
+foreach (var step in range(1, training_steps + 1))
{
- var feed_dict = new Dictionary();
- feed_dict.Add(a, 3.0f);
- feed_dict.Add(b, 2.0f);
+ // Run the optimization to update W and b values.
+ // Wrap computation inside a GradientTape for automatic differentiation.
+ using var g = tf.GradientTape();
+ // Linear regression (Wx + b).
+ var pred = W * X + b;
+ // Mean square error.
+ var loss = tf.reduce_sum(tf.pow(pred - Y, 2)) / (2 * n_samples);
+ // should stop recording
+ // Compute gradients.
+ var gradients = g.gradient(loss, (W, b));
- var o = sess.run(c, feed_dict);
+ // Update W and b following gradients.
+ optimizer.apply_gradients(zip(gradients, (W, b)));
+
+ if (step % display_step == 0)
+ {
+ pred = W * X + b;
+ loss = tf.reduce_sum(tf.pow(pred - Y, 2)) / (2 * n_samples);
+ print($"step: {step}, loss: {loss.numpy()}, W: {W.numpy()}, b: {b.numpy()}");
+ }
}
```
+
+Run this example in [Jupyter Notebook](https://github.com/SciSharp/SciSharpCube).
+
+### Example - Toy version of `ResNet` in `Keras` functional API
+
+```csharp
+using static Tensorflow.Binding;
+using static Tensorflow.KerasApi;
+using Tensorflow;
+using Tensorflow.NumPy;
+
+var layers = keras.layers;
+// input layer
+var inputs = keras.Input(shape: (32, 32, 3), name: "img");
+// convolutional layer
+var x = layers.Conv2D(32, 3, activation: "relu").Apply(inputs);
+x = layers.Conv2D(64, 3, activation: "relu").Apply(x);
+var block_1_output = layers.MaxPooling2D(3).Apply(x);
+x = layers.Conv2D(64, 3, activation: "relu", padding: "same").Apply(block_1_output);
+x = layers.Conv2D(64, 3, activation: "relu", padding: "same").Apply(x);
+var block_2_output = layers.Add().Apply(new Tensors(x, block_1_output));
+x = layers.Conv2D(64, 3, activation: "relu", padding: "same").Apply(block_2_output);
+x = layers.Conv2D(64, 3, activation: "relu", padding: "same").Apply(x);
+var block_3_output = layers.Add().Apply(new Tensors(x, block_2_output));
+x = layers.Conv2D(64, 3, activation: "relu").Apply(block_3_output);
+x = layers.GlobalAveragePooling2D().Apply(x);
+x = layers.Dense(256, activation: "relu").Apply(x);
+x = layers.Dropout(0.5f).Apply(x);
+// output layer
+var outputs = layers.Dense(10).Apply(x);
+// build keras model
+var model = keras.Model(inputs, outputs, name: "toy_resnet");
+model.summary();
+// compile keras model in tensorflow static graph
+model.compile(optimizer: keras.optimizers.RMSprop(1e-3f),
+ loss: keras.losses.SparseCategoricalCrossentropy(from_logits: true),
+ metrics: new[] { "acc" });
+// prepare dataset
+var ((x_train, y_train), (x_test, y_test)) = keras.datasets.cifar10.load_data();
+// normalize the input
+x_train = x_train / 255.0f;
+// training
+model.fit(x_train[new Slice(0, 2000)], y_train[new Slice(0, 2000)],
+ batch_size: 64,
+ epochs: 10,
+ validation_split: 0.2f);
+// save the model
+model.save("./toy_resnet_model");
+```
+
+The F# example for linear regression is available [here](docs/Example-fsharp.md).
+
+More adcanced examples could be found in [TensorFlow.NET Examples](https://github.com/SciSharp/TensorFlow.NET-Examples).
+
+## Version Relationships
+
+| TensorFlow.NET Versions | tensorflow 1.14, cuda 10.0 | tensorflow 1.15, cuda 10.0 | tensorflow 2.3, cuda 10.1 | tensorflow 2.4, cuda 11 | tensorflow 2.7, cuda 11 |tensorflow 2.10, cuda 11 |
+| -------------------------- | ------------- | -------------- | ------------- | ------------- | ------------ | ------------ |
+| tf.net 0.10x, tf.keras 0.10 | | | | | | x |
+| tf.net 0.7x, tf.keras 0.7 | | | | | x | |
+| tf.net 0.4x, tf.keras 0.5 | | | | x | | |
+| tf.net 0.3x, tf.keras 0.4 | | | x | | | |
+| tf.net 0.2x | | x | x | | | |
+| tf.net 0.15 | x | x | | | | |
+| tf.net 0.14 | x | | | | | |
+
+
+```
+tf.net 0.4x -> tf native 2.4
+tf.net 0.6x -> tf native 2.6
+tf.net 0.7x -> tf native 2.7
+tf.net 0.10x -> tf native 2.10
+...
+```
+
+## Contribution:
+
+Feel like contributing to one of the hottest projects in the Machine Learning field? Want to know how Tensorflow magically creates the computational graph?
+
+We appreciate every contribution however small! There are tasks for novices to experts alike, if everyone tackles only a small task the sum of contributions will be huge.
+
+You can:
+- Star Tensorflow.NET or share it with others
+- Tell us about the missing APIs compared to Tensorflow
+- Port Tensorflow unit tests from Python to C# or F#
+- Port Tensorflow examples to C# or F# and raise issues if you come accross missing parts of the API or BUG
+- Debug one of the unit tests that is marked as Ignored to get it to work
+- Debug one of the not yet working examples and get it to work
+- Help us to complete the documentions.
+
+
+#### How to debug unit tests:
+
+The best way to find out why a unit test is failing is to single step it in C# or F# and its corresponding Python at the same time to see where the flow of execution digresses or where variables exhibit different values. Good Python IDEs like PyCharm let you single step into the tensorflow library code.
+
+#### Git Knowhow for Contributors
+
+Add SciSharp/TensorFlow.NET as upstream to your local repo ...
+```git
+git remote add upstream git@github.com:SciSharp/TensorFlow.NET.git
+```
+
+Please make sure you keep your fork up to date by regularly pulling from upstream.
+```git
+git pull upstream master
+```
+
+### Support
+Buy our book to make open source project be sustainable [TensorFlow.NET实战](https://item.jd.com/13441549.html)
+
+
+
+
+
+
+### Contact
+
+Join our chat on [Discord](https://discord.gg/qRVm82fKTS) or [Gitter](https://gitter.im/sci-sharp/community).
+
+Follow us on [Twitter](https://twitter.com/ScisharpStack), [Facebook](https://www.facebook.com/scisharp.stack.9), [Medium](https://medium.com/scisharp), [LinkedIn](https://www.linkedin.com/company/scisharp-stack/).
+
+TensorFlow.NET is a part of [SciSharp STACK](https://scisharp.github.io/SciSharp/)
+
+
diff --git a/TensorFlow.NET.sln b/TensorFlow.NET.sln
index 8936dd3d9..e0c273568 100644
--- a/TensorFlow.NET.sln
+++ b/TensorFlow.NET.sln
@@ -1,42 +1,390 @@
Microsoft Visual Studio Solution File, Format Version 12.00
-# Visual Studio 15
-VisualStudioVersion = 15.0.28307.136
+# Visual Studio Version 17
+VisualStudioVersion = 17.4.33213.308
MinimumVisualStudioVersion = 10.0.40219.1
-Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "TensorFlowNET.UnitTest", "test\TensorFlowNET.UnitTest\TensorFlowNET.UnitTest.csproj", "{029A8CF1-CF95-4DCB-98AA-9D3D96A83B3E}"
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Binding", "src\TensorFlowNET.Core\Tensorflow.Binding.csproj", "{FD682AC0-7B2D-45D3-8B0D-C6D678B04144}"
EndProject
-Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "TensorFlowNET.Core", "src\TensorFlowNET.Core\TensorFlowNET.Core.csproj", "{1B1BC950-2CB0-48E2-B4CD-8172AFF67A10}"
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Binding.UnitTest", "test\TensorFlowNET.UnitTest\Tensorflow.Binding.UnitTest.csproj", "{23C28035-2FCE-41F3-9A12-E73CE8A5AE32}"
EndProject
-Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "TensorFlowNET.Examples", "test\TensorFlowNET.Examples\TensorFlowNET.Examples.csproj", "{1FE60088-157C-4140-91AB-E96B915E4BAE}"
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Keras", "src\TensorFlowNET.Keras\Tensorflow.Keras.csproj", "{49D71826-C03D-4FA7-9BAC-22C1327E65CF}"
EndProject
-Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "NumSharp.Core", "..\NumSharp\src\NumSharp.Core\NumSharp.Core.csproj", "{6ACED8FF-F08E-40E6-A75D-D01BAAA41072}"
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Text", "src\TensorFlowNET.Text\Tensorflow.Text.csproj", "{1AB8108D-4FFE-4A16-88E7-328EAF686370}"
+EndProject
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Recommenders", "src\TensorFlowNET.Recommenders\Tensorflow.Recommenders.csproj", "{F17AAECB-960A-4E18-A270-BAD776F0E55B}"
+EndProject
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Native.UnitTest", "test\TensorFlowNET.Native.UnitTest\Tensorflow.Native.UnitTest.csproj", "{84CA35F8-99FC-408E-8DF3-5AA175E5EFD3}"
+EndProject
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Keras.UnitTest", "test\TensorFlowNET.Keras.UnitTest\Tensorflow.Keras.UnitTest.csproj", "{79EB56DF-E29E-4AE2-A7D9-FE403FD919BA}"
+EndProject
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "TensorFlowNET.Graph.UnitTest", "test\TensorFlowNET.Graph.UnitTest\TensorFlowNET.Graph.UnitTest.csproj", "{3F5388FF-FBB4-462B-8F6F-829FFBAEB8A3}"
+EndProject
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Hub", "src\TensorflowNET.Hub\Tensorflow.Hub.csproj", "{9738D16A-CFA0-405C-A7DF-D3D203B0CB18}"
+EndProject
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Hub.Unittest", "test\TensorflowNET.Hub.Unittest\Tensorflow.Hub.Unittest.csproj", "{7DEA8760-E401-4872-81F3-405F185A13A0}"
+EndProject
+Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "src", "src", "{01A1787F-A9BE-4221-84E8-6360DD010AB6}"
+EndProject
+Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "test", "test", "{1B0918B9-65AD-4F34-A287-AF4597B27DBD}"
+EndProject
+Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "tools", "tools", "{E1A5D2B7-10AF-4876-85C0-7714EF274214}"
+EndProject
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.CodeGen", "tools\Tensorflow.CodeGen\Tensorflow.CodeGen.csproj", "{3D92142F-EEDB-469B-B03C-4E38728BFE4C}"
+EndProject
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Redist.NativeLibrarySplitter", "tools\Tensorflow.Redist.NativeLibrarySplitter\Tensorflow.Redist.NativeLibrarySplitter.csproj", "{AB131FA7-B7C3-4ABF-ABDE-E059C72A613C}"
+EndProject
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.UnitTest.RedistHolder", "tools\Tensorflow.UnitTest.RedistHolder\Tensorflow.UnitTest.RedistHolder.csproj", "{D24FCAA5-548C-4251-B226-A1B6535D0845}"
+EndProject
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Benchmark", "tools\TensorFlowNET.Benchmarks\Tensorflow.Benchmark.csproj", "{C23563DB-FE21-48E7-A411-87A109E4A899}"
+EndProject
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Tensorflow.Console", "tools\TensorFlowNET.Console\Tensorflow.Console.csproj", "{1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0}"
+EndProject
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "TensorFlow.Kernel.UnitTest", "test\TensorFlow.Kernel.UnitTest\TensorFlow.Kernel.UnitTest.csproj", "{654A027D-1364-4729-880B-144DFE1FF5BB}"
+EndProject
+Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Tensorflow.UnitTest", "test\Tensorflow.UnitTest\Tensorflow.UnitTest.csproj", "{A73DF5A6-866E-4AED-9017-AA2EE86368C4}"
EndProject
Global
GlobalSection(SolutionConfigurationPlatforms) = preSolution
Debug|Any CPU = Debug|Any CPU
+ Debug|x64 = Debug|x64
+ Debug|x86 = Debug|x86
+ GPU|Any CPU = GPU|Any CPU
+ GPU|x64 = GPU|x64
+ GPU|x86 = GPU|x86
Release|Any CPU = Release|Any CPU
+ Release|x64 = Release|x64
+ Release|x86 = Release|x86
EndGlobalSection
GlobalSection(ProjectConfigurationPlatforms) = postSolution
- {029A8CF1-CF95-4DCB-98AA-9D3D96A83B3E}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
- {029A8CF1-CF95-4DCB-98AA-9D3D96A83B3E}.Debug|Any CPU.Build.0 = Debug|Any CPU
- {029A8CF1-CF95-4DCB-98AA-9D3D96A83B3E}.Release|Any CPU.ActiveCfg = Release|Any CPU
- {029A8CF1-CF95-4DCB-98AA-9D3D96A83B3E}.Release|Any CPU.Build.0 = Release|Any CPU
- {1B1BC950-2CB0-48E2-B4CD-8172AFF67A10}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
- {1B1BC950-2CB0-48E2-B4CD-8172AFF67A10}.Debug|Any CPU.Build.0 = Debug|Any CPU
- {1B1BC950-2CB0-48E2-B4CD-8172AFF67A10}.Release|Any CPU.ActiveCfg = Release|Any CPU
- {1B1BC950-2CB0-48E2-B4CD-8172AFF67A10}.Release|Any CPU.Build.0 = Release|Any CPU
- {1FE60088-157C-4140-91AB-E96B915E4BAE}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
- {1FE60088-157C-4140-91AB-E96B915E4BAE}.Debug|Any CPU.Build.0 = Debug|Any CPU
- {1FE60088-157C-4140-91AB-E96B915E4BAE}.Release|Any CPU.ActiveCfg = Release|Any CPU
- {1FE60088-157C-4140-91AB-E96B915E4BAE}.Release|Any CPU.Build.0 = Release|Any CPU
- {6ACED8FF-F08E-40E6-A75D-D01BAAA41072}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
- {6ACED8FF-F08E-40E6-A75D-D01BAAA41072}.Debug|Any CPU.Build.0 = Debug|Any CPU
- {6ACED8FF-F08E-40E6-A75D-D01BAAA41072}.Release|Any CPU.ActiveCfg = Release|Any CPU
- {6ACED8FF-F08E-40E6-A75D-D01BAAA41072}.Release|Any CPU.Build.0 = Release|Any CPU
+ {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug|x64.ActiveCfg = Debug|x64
+ {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug|x64.Build.0 = Debug|x64
+ {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug|x86.ActiveCfg = Debug|Any CPU
+ {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug|x86.Build.0 = Debug|Any CPU
+ {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.GPU|Any CPU.ActiveCfg = GPU|Any CPU
+ {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.GPU|Any CPU.Build.0 = GPU|Any CPU
+ {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.GPU|x64.ActiveCfg = GPU|x64
+ {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.GPU|x64.Build.0 = GPU|x64
+ {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.GPU|x86.ActiveCfg = GPU|Any CPU
+ {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.GPU|x86.Build.0 = GPU|Any CPU
+ {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Release|Any CPU.Build.0 = Release|Any CPU
+ {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Release|x64.ActiveCfg = Release|x64
+ {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Release|x64.Build.0 = Release|x64
+ {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Release|x86.ActiveCfg = Release|Any CPU
+ {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Release|x86.Build.0 = Release|Any CPU
+ {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug|x64.ActiveCfg = Debug|x64
+ {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug|x64.Build.0 = Debug|x64
+ {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug|x86.ActiveCfg = Debug|Any CPU
+ {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug|x86.Build.0 = Debug|Any CPU
+ {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.GPU|Any CPU.ActiveCfg = Release|Any CPU
+ {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.GPU|Any CPU.Build.0 = Release|Any CPU
+ {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.GPU|x64.ActiveCfg = Release|x64
+ {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.GPU|x64.Build.0 = Release|x64
+ {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.GPU|x86.ActiveCfg = Release|Any CPU
+ {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.GPU|x86.Build.0 = Release|Any CPU
+ {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Release|Any CPU.Build.0 = Release|Any CPU
+ {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Release|x64.ActiveCfg = Release|x64
+ {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Release|x64.Build.0 = Release|x64
+ {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Release|x86.ActiveCfg = Release|Any CPU
+ {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Release|x86.Build.0 = Release|Any CPU
+ {49D71826-C03D-4FA7-9BAC-22C1327E65CF}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {49D71826-C03D-4FA7-9BAC-22C1327E65CF}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {49D71826-C03D-4FA7-9BAC-22C1327E65CF}.Debug|x64.ActiveCfg = Debug|x64
+ {49D71826-C03D-4FA7-9BAC-22C1327E65CF}.Debug|x64.Build.0 = Debug|x64
+ {49D71826-C03D-4FA7-9BAC-22C1327E65CF}.Debug|x86.ActiveCfg = Debug|Any CPU
+ {49D71826-C03D-4FA7-9BAC-22C1327E65CF}.Debug|x86.Build.0 = Debug|Any CPU
+ {49D71826-C03D-4FA7-9BAC-22C1327E65CF}.GPU|Any CPU.ActiveCfg = GPU|Any CPU
+ {49D71826-C03D-4FA7-9BAC-22C1327E65CF}.GPU|Any CPU.Build.0 = GPU|Any CPU
+ {49D71826-C03D-4FA7-9BAC-22C1327E65CF}.GPU|x64.ActiveCfg = GPU|x64
+ {49D71826-C03D-4FA7-9BAC-22C1327E65CF}.GPU|x64.Build.0 = GPU|x64
+ {49D71826-C03D-4FA7-9BAC-22C1327E65CF}.GPU|x86.ActiveCfg = GPU|Any CPU
+ {49D71826-C03D-4FA7-9BAC-22C1327E65CF}.GPU|x86.Build.0 = GPU|Any CPU
+ {49D71826-C03D-4FA7-9BAC-22C1327E65CF}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {49D71826-C03D-4FA7-9BAC-22C1327E65CF}.Release|Any CPU.Build.0 = Release|Any CPU
+ {49D71826-C03D-4FA7-9BAC-22C1327E65CF}.Release|x64.ActiveCfg = Release|x64
+ {49D71826-C03D-4FA7-9BAC-22C1327E65CF}.Release|x64.Build.0 = Release|x64
+ {49D71826-C03D-4FA7-9BAC-22C1327E65CF}.Release|x86.ActiveCfg = Release|Any CPU
+ {49D71826-C03D-4FA7-9BAC-22C1327E65CF}.Release|x86.Build.0 = Release|Any CPU
+ {1AB8108D-4FFE-4A16-88E7-328EAF686370}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {1AB8108D-4FFE-4A16-88E7-328EAF686370}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {1AB8108D-4FFE-4A16-88E7-328EAF686370}.Debug|x64.ActiveCfg = Debug|x64
+ {1AB8108D-4FFE-4A16-88E7-328EAF686370}.Debug|x64.Build.0 = Debug|x64
+ {1AB8108D-4FFE-4A16-88E7-328EAF686370}.Debug|x86.ActiveCfg = Debug|Any CPU
+ {1AB8108D-4FFE-4A16-88E7-328EAF686370}.Debug|x86.Build.0 = Debug|Any CPU
+ {1AB8108D-4FFE-4A16-88E7-328EAF686370}.GPU|Any CPU.ActiveCfg = Release|Any CPU
+ {1AB8108D-4FFE-4A16-88E7-328EAF686370}.GPU|Any CPU.Build.0 = Release|Any CPU
+ {1AB8108D-4FFE-4A16-88E7-328EAF686370}.GPU|x64.ActiveCfg = Release|x64
+ {1AB8108D-4FFE-4A16-88E7-328EAF686370}.GPU|x64.Build.0 = Release|x64
+ {1AB8108D-4FFE-4A16-88E7-328EAF686370}.GPU|x86.ActiveCfg = Release|Any CPU
+ {1AB8108D-4FFE-4A16-88E7-328EAF686370}.GPU|x86.Build.0 = Release|Any CPU
+ {1AB8108D-4FFE-4A16-88E7-328EAF686370}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {1AB8108D-4FFE-4A16-88E7-328EAF686370}.Release|Any CPU.Build.0 = Release|Any CPU
+ {1AB8108D-4FFE-4A16-88E7-328EAF686370}.Release|x64.ActiveCfg = Release|x64
+ {1AB8108D-4FFE-4A16-88E7-328EAF686370}.Release|x64.Build.0 = Release|x64
+ {1AB8108D-4FFE-4A16-88E7-328EAF686370}.Release|x86.ActiveCfg = Release|Any CPU
+ {1AB8108D-4FFE-4A16-88E7-328EAF686370}.Release|x86.Build.0 = Release|Any CPU
+ {F17AAECB-960A-4E18-A270-BAD776F0E55B}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {F17AAECB-960A-4E18-A270-BAD776F0E55B}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {F17AAECB-960A-4E18-A270-BAD776F0E55B}.Debug|x64.ActiveCfg = Debug|x64
+ {F17AAECB-960A-4E18-A270-BAD776F0E55B}.Debug|x64.Build.0 = Debug|x64
+ {F17AAECB-960A-4E18-A270-BAD776F0E55B}.Debug|x86.ActiveCfg = Debug|Any CPU
+ {F17AAECB-960A-4E18-A270-BAD776F0E55B}.Debug|x86.Build.0 = Debug|Any CPU
+ {F17AAECB-960A-4E18-A270-BAD776F0E55B}.GPU|Any CPU.ActiveCfg = Release|Any CPU
+ {F17AAECB-960A-4E18-A270-BAD776F0E55B}.GPU|Any CPU.Build.0 = Release|Any CPU
+ {F17AAECB-960A-4E18-A270-BAD776F0E55B}.GPU|x64.ActiveCfg = Release|x64
+ {F17AAECB-960A-4E18-A270-BAD776F0E55B}.GPU|x64.Build.0 = Release|x64
+ {F17AAECB-960A-4E18-A270-BAD776F0E55B}.GPU|x86.ActiveCfg = Release|Any CPU
+ {F17AAECB-960A-4E18-A270-BAD776F0E55B}.GPU|x86.Build.0 = Release|Any CPU
+ {F17AAECB-960A-4E18-A270-BAD776F0E55B}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {F17AAECB-960A-4E18-A270-BAD776F0E55B}.Release|Any CPU.Build.0 = Release|Any CPU
+ {F17AAECB-960A-4E18-A270-BAD776F0E55B}.Release|x64.ActiveCfg = Release|x64
+ {F17AAECB-960A-4E18-A270-BAD776F0E55B}.Release|x64.Build.0 = Release|x64
+ {F17AAECB-960A-4E18-A270-BAD776F0E55B}.Release|x86.ActiveCfg = Release|Any CPU
+ {F17AAECB-960A-4E18-A270-BAD776F0E55B}.Release|x86.Build.0 = Release|Any CPU
+ {84CA35F8-99FC-408E-8DF3-5AA175E5EFD3}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {84CA35F8-99FC-408E-8DF3-5AA175E5EFD3}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {84CA35F8-99FC-408E-8DF3-5AA175E5EFD3}.Debug|x64.ActiveCfg = Debug|x64
+ {84CA35F8-99FC-408E-8DF3-5AA175E5EFD3}.Debug|x64.Build.0 = Debug|x64
+ {84CA35F8-99FC-408E-8DF3-5AA175E5EFD3}.Debug|x86.ActiveCfg = Debug|Any CPU
+ {84CA35F8-99FC-408E-8DF3-5AA175E5EFD3}.Debug|x86.Build.0 = Debug|Any CPU
+ {84CA35F8-99FC-408E-8DF3-5AA175E5EFD3}.GPU|Any CPU.ActiveCfg = Release|Any CPU
+ {84CA35F8-99FC-408E-8DF3-5AA175E5EFD3}.GPU|Any CPU.Build.0 = Release|Any CPU
+ {84CA35F8-99FC-408E-8DF3-5AA175E5EFD3}.GPU|x64.ActiveCfg = Release|x64
+ {84CA35F8-99FC-408E-8DF3-5AA175E5EFD3}.GPU|x64.Build.0 = Release|x64
+ {84CA35F8-99FC-408E-8DF3-5AA175E5EFD3}.GPU|x86.ActiveCfg = Release|Any CPU
+ {84CA35F8-99FC-408E-8DF3-5AA175E5EFD3}.GPU|x86.Build.0 = Release|Any CPU
+ {84CA35F8-99FC-408E-8DF3-5AA175E5EFD3}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {84CA35F8-99FC-408E-8DF3-5AA175E5EFD3}.Release|Any CPU.Build.0 = Release|Any CPU
+ {84CA35F8-99FC-408E-8DF3-5AA175E5EFD3}.Release|x64.ActiveCfg = Release|x64
+ {84CA35F8-99FC-408E-8DF3-5AA175E5EFD3}.Release|x64.Build.0 = Release|x64
+ {84CA35F8-99FC-408E-8DF3-5AA175E5EFD3}.Release|x86.ActiveCfg = Release|Any CPU
+ {84CA35F8-99FC-408E-8DF3-5AA175E5EFD3}.Release|x86.Build.0 = Release|Any CPU
+ {79EB56DF-E29E-4AE2-A7D9-FE403FD919BA}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {79EB56DF-E29E-4AE2-A7D9-FE403FD919BA}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {79EB56DF-E29E-4AE2-A7D9-FE403FD919BA}.Debug|x64.ActiveCfg = Debug|x64
+ {79EB56DF-E29E-4AE2-A7D9-FE403FD919BA}.Debug|x64.Build.0 = Debug|x64
+ {79EB56DF-E29E-4AE2-A7D9-FE403FD919BA}.Debug|x86.ActiveCfg = Debug|Any CPU
+ {79EB56DF-E29E-4AE2-A7D9-FE403FD919BA}.Debug|x86.Build.0 = Debug|Any CPU
+ {79EB56DF-E29E-4AE2-A7D9-FE403FD919BA}.GPU|Any CPU.ActiveCfg = Release|Any CPU
+ {79EB56DF-E29E-4AE2-A7D9-FE403FD919BA}.GPU|Any CPU.Build.0 = Release|Any CPU
+ {79EB56DF-E29E-4AE2-A7D9-FE403FD919BA}.GPU|x64.ActiveCfg = Release|x64
+ {79EB56DF-E29E-4AE2-A7D9-FE403FD919BA}.GPU|x64.Build.0 = Release|x64
+ {79EB56DF-E29E-4AE2-A7D9-FE403FD919BA}.GPU|x86.ActiveCfg = Release|Any CPU
+ {79EB56DF-E29E-4AE2-A7D9-FE403FD919BA}.GPU|x86.Build.0 = Release|Any CPU
+ {79EB56DF-E29E-4AE2-A7D9-FE403FD919BA}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {79EB56DF-E29E-4AE2-A7D9-FE403FD919BA}.Release|Any CPU.Build.0 = Release|Any CPU
+ {79EB56DF-E29E-4AE2-A7D9-FE403FD919BA}.Release|x64.ActiveCfg = Release|x64
+ {79EB56DF-E29E-4AE2-A7D9-FE403FD919BA}.Release|x64.Build.0 = Release|x64
+ {79EB56DF-E29E-4AE2-A7D9-FE403FD919BA}.Release|x86.ActiveCfg = Release|Any CPU
+ {79EB56DF-E29E-4AE2-A7D9-FE403FD919BA}.Release|x86.Build.0 = Release|Any CPU
+ {3F5388FF-FBB4-462B-8F6F-829FFBAEB8A3}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {3F5388FF-FBB4-462B-8F6F-829FFBAEB8A3}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {3F5388FF-FBB4-462B-8F6F-829FFBAEB8A3}.Debug|x64.ActiveCfg = Debug|x64
+ {3F5388FF-FBB4-462B-8F6F-829FFBAEB8A3}.Debug|x64.Build.0 = Debug|x64
+ {3F5388FF-FBB4-462B-8F6F-829FFBAEB8A3}.Debug|x86.ActiveCfg = Debug|Any CPU
+ {3F5388FF-FBB4-462B-8F6F-829FFBAEB8A3}.Debug|x86.Build.0 = Debug|Any CPU
+ {3F5388FF-FBB4-462B-8F6F-829FFBAEB8A3}.GPU|Any CPU.ActiveCfg = Release|Any CPU
+ {3F5388FF-FBB4-462B-8F6F-829FFBAEB8A3}.GPU|Any CPU.Build.0 = Release|Any CPU
+ {3F5388FF-FBB4-462B-8F6F-829FFBAEB8A3}.GPU|x64.ActiveCfg = Release|x64
+ {3F5388FF-FBB4-462B-8F6F-829FFBAEB8A3}.GPU|x64.Build.0 = Release|x64
+ {3F5388FF-FBB4-462B-8F6F-829FFBAEB8A3}.GPU|x86.ActiveCfg = Release|Any CPU
+ {3F5388FF-FBB4-462B-8F6F-829FFBAEB8A3}.GPU|x86.Build.0 = Release|Any CPU
+ {3F5388FF-FBB4-462B-8F6F-829FFBAEB8A3}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {3F5388FF-FBB4-462B-8F6F-829FFBAEB8A3}.Release|Any CPU.Build.0 = Release|Any CPU
+ {3F5388FF-FBB4-462B-8F6F-829FFBAEB8A3}.Release|x64.ActiveCfg = Release|x64
+ {3F5388FF-FBB4-462B-8F6F-829FFBAEB8A3}.Release|x64.Build.0 = Release|x64
+ {3F5388FF-FBB4-462B-8F6F-829FFBAEB8A3}.Release|x86.ActiveCfg = Release|Any CPU
+ {3F5388FF-FBB4-462B-8F6F-829FFBAEB8A3}.Release|x86.Build.0 = Release|Any CPU
+ {9738D16A-CFA0-405C-A7DF-D3D203B0CB18}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {9738D16A-CFA0-405C-A7DF-D3D203B0CB18}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {9738D16A-CFA0-405C-A7DF-D3D203B0CB18}.Debug|x64.ActiveCfg = Debug|Any CPU
+ {9738D16A-CFA0-405C-A7DF-D3D203B0CB18}.Debug|x64.Build.0 = Debug|Any CPU
+ {9738D16A-CFA0-405C-A7DF-D3D203B0CB18}.Debug|x86.ActiveCfg = Debug|Any CPU
+ {9738D16A-CFA0-405C-A7DF-D3D203B0CB18}.Debug|x86.Build.0 = Debug|Any CPU
+ {9738D16A-CFA0-405C-A7DF-D3D203B0CB18}.GPU|Any CPU.ActiveCfg = Debug|Any CPU
+ {9738D16A-CFA0-405C-A7DF-D3D203B0CB18}.GPU|Any CPU.Build.0 = Debug|Any CPU
+ {9738D16A-CFA0-405C-A7DF-D3D203B0CB18}.GPU|x64.ActiveCfg = Debug|Any CPU
+ {9738D16A-CFA0-405C-A7DF-D3D203B0CB18}.GPU|x64.Build.0 = Debug|Any CPU
+ {9738D16A-CFA0-405C-A7DF-D3D203B0CB18}.GPU|x86.ActiveCfg = Debug|Any CPU
+ {9738D16A-CFA0-405C-A7DF-D3D203B0CB18}.GPU|x86.Build.0 = Debug|Any CPU
+ {9738D16A-CFA0-405C-A7DF-D3D203B0CB18}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {9738D16A-CFA0-405C-A7DF-D3D203B0CB18}.Release|Any CPU.Build.0 = Release|Any CPU
+ {9738D16A-CFA0-405C-A7DF-D3D203B0CB18}.Release|x64.ActiveCfg = Release|Any CPU
+ {9738D16A-CFA0-405C-A7DF-D3D203B0CB18}.Release|x64.Build.0 = Release|Any CPU
+ {9738D16A-CFA0-405C-A7DF-D3D203B0CB18}.Release|x86.ActiveCfg = Release|Any CPU
+ {9738D16A-CFA0-405C-A7DF-D3D203B0CB18}.Release|x86.Build.0 = Release|Any CPU
+ {7DEA8760-E401-4872-81F3-405F185A13A0}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {7DEA8760-E401-4872-81F3-405F185A13A0}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {7DEA8760-E401-4872-81F3-405F185A13A0}.Debug|x64.ActiveCfg = Debug|Any CPU
+ {7DEA8760-E401-4872-81F3-405F185A13A0}.Debug|x64.Build.0 = Debug|Any CPU
+ {7DEA8760-E401-4872-81F3-405F185A13A0}.Debug|x86.ActiveCfg = Debug|Any CPU
+ {7DEA8760-E401-4872-81F3-405F185A13A0}.Debug|x86.Build.0 = Debug|Any CPU
+ {7DEA8760-E401-4872-81F3-405F185A13A0}.GPU|Any CPU.ActiveCfg = Debug|Any CPU
+ {7DEA8760-E401-4872-81F3-405F185A13A0}.GPU|Any CPU.Build.0 = Debug|Any CPU
+ {7DEA8760-E401-4872-81F3-405F185A13A0}.GPU|x64.ActiveCfg = Debug|Any CPU
+ {7DEA8760-E401-4872-81F3-405F185A13A0}.GPU|x64.Build.0 = Debug|Any CPU
+ {7DEA8760-E401-4872-81F3-405F185A13A0}.GPU|x86.ActiveCfg = Debug|Any CPU
+ {7DEA8760-E401-4872-81F3-405F185A13A0}.GPU|x86.Build.0 = Debug|Any CPU
+ {7DEA8760-E401-4872-81F3-405F185A13A0}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {7DEA8760-E401-4872-81F3-405F185A13A0}.Release|Any CPU.Build.0 = Release|Any CPU
+ {7DEA8760-E401-4872-81F3-405F185A13A0}.Release|x64.ActiveCfg = Release|Any CPU
+ {7DEA8760-E401-4872-81F3-405F185A13A0}.Release|x64.Build.0 = Release|Any CPU
+ {7DEA8760-E401-4872-81F3-405F185A13A0}.Release|x86.ActiveCfg = Release|Any CPU
+ {7DEA8760-E401-4872-81F3-405F185A13A0}.Release|x86.Build.0 = Release|Any CPU
+ {3D92142F-EEDB-469B-B03C-4E38728BFE4C}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {3D92142F-EEDB-469B-B03C-4E38728BFE4C}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {3D92142F-EEDB-469B-B03C-4E38728BFE4C}.Debug|x64.ActiveCfg = Debug|Any CPU
+ {3D92142F-EEDB-469B-B03C-4E38728BFE4C}.Debug|x64.Build.0 = Debug|Any CPU
+ {3D92142F-EEDB-469B-B03C-4E38728BFE4C}.Debug|x86.ActiveCfg = Debug|Any CPU
+ {3D92142F-EEDB-469B-B03C-4E38728BFE4C}.Debug|x86.Build.0 = Debug|Any CPU
+ {3D92142F-EEDB-469B-B03C-4E38728BFE4C}.GPU|Any CPU.ActiveCfg = Debug|Any CPU
+ {3D92142F-EEDB-469B-B03C-4E38728BFE4C}.GPU|Any CPU.Build.0 = Debug|Any CPU
+ {3D92142F-EEDB-469B-B03C-4E38728BFE4C}.GPU|x64.ActiveCfg = Debug|Any CPU
+ {3D92142F-EEDB-469B-B03C-4E38728BFE4C}.GPU|x64.Build.0 = Debug|Any CPU
+ {3D92142F-EEDB-469B-B03C-4E38728BFE4C}.GPU|x86.ActiveCfg = Debug|Any CPU
+ {3D92142F-EEDB-469B-B03C-4E38728BFE4C}.GPU|x86.Build.0 = Debug|Any CPU
+ {3D92142F-EEDB-469B-B03C-4E38728BFE4C}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {3D92142F-EEDB-469B-B03C-4E38728BFE4C}.Release|Any CPU.Build.0 = Release|Any CPU
+ {3D92142F-EEDB-469B-B03C-4E38728BFE4C}.Release|x64.ActiveCfg = Release|Any CPU
+ {3D92142F-EEDB-469B-B03C-4E38728BFE4C}.Release|x64.Build.0 = Release|Any CPU
+ {3D92142F-EEDB-469B-B03C-4E38728BFE4C}.Release|x86.ActiveCfg = Release|Any CPU
+ {3D92142F-EEDB-469B-B03C-4E38728BFE4C}.Release|x86.Build.0 = Release|Any CPU
+ {AB131FA7-B7C3-4ABF-ABDE-E059C72A613C}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {AB131FA7-B7C3-4ABF-ABDE-E059C72A613C}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {AB131FA7-B7C3-4ABF-ABDE-E059C72A613C}.Debug|x64.ActiveCfg = Debug|Any CPU
+ {AB131FA7-B7C3-4ABF-ABDE-E059C72A613C}.Debug|x64.Build.0 = Debug|Any CPU
+ {AB131FA7-B7C3-4ABF-ABDE-E059C72A613C}.Debug|x86.ActiveCfg = Debug|Any CPU
+ {AB131FA7-B7C3-4ABF-ABDE-E059C72A613C}.Debug|x86.Build.0 = Debug|Any CPU
+ {AB131FA7-B7C3-4ABF-ABDE-E059C72A613C}.GPU|Any CPU.ActiveCfg = Debug|Any CPU
+ {AB131FA7-B7C3-4ABF-ABDE-E059C72A613C}.GPU|Any CPU.Build.0 = Debug|Any CPU
+ {AB131FA7-B7C3-4ABF-ABDE-E059C72A613C}.GPU|x64.ActiveCfg = Debug|Any CPU
+ {AB131FA7-B7C3-4ABF-ABDE-E059C72A613C}.GPU|x64.Build.0 = Debug|Any CPU
+ {AB131FA7-B7C3-4ABF-ABDE-E059C72A613C}.GPU|x86.ActiveCfg = Debug|Any CPU
+ {AB131FA7-B7C3-4ABF-ABDE-E059C72A613C}.GPU|x86.Build.0 = Debug|Any CPU
+ {AB131FA7-B7C3-4ABF-ABDE-E059C72A613C}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {AB131FA7-B7C3-4ABF-ABDE-E059C72A613C}.Release|Any CPU.Build.0 = Release|Any CPU
+ {AB131FA7-B7C3-4ABF-ABDE-E059C72A613C}.Release|x64.ActiveCfg = Release|Any CPU
+ {AB131FA7-B7C3-4ABF-ABDE-E059C72A613C}.Release|x64.Build.0 = Release|Any CPU
+ {AB131FA7-B7C3-4ABF-ABDE-E059C72A613C}.Release|x86.ActiveCfg = Release|Any CPU
+ {AB131FA7-B7C3-4ABF-ABDE-E059C72A613C}.Release|x86.Build.0 = Release|Any CPU
+ {D24FCAA5-548C-4251-B226-A1B6535D0845}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {D24FCAA5-548C-4251-B226-A1B6535D0845}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {D24FCAA5-548C-4251-B226-A1B6535D0845}.Debug|x64.ActiveCfg = Debug|Any CPU
+ {D24FCAA5-548C-4251-B226-A1B6535D0845}.Debug|x64.Build.0 = Debug|Any CPU
+ {D24FCAA5-548C-4251-B226-A1B6535D0845}.Debug|x86.ActiveCfg = Debug|Any CPU
+ {D24FCAA5-548C-4251-B226-A1B6535D0845}.Debug|x86.Build.0 = Debug|Any CPU
+ {D24FCAA5-548C-4251-B226-A1B6535D0845}.GPU|Any CPU.ActiveCfg = Debug|Any CPU
+ {D24FCAA5-548C-4251-B226-A1B6535D0845}.GPU|Any CPU.Build.0 = Debug|Any CPU
+ {D24FCAA5-548C-4251-B226-A1B6535D0845}.GPU|x64.ActiveCfg = Debug|Any CPU
+ {D24FCAA5-548C-4251-B226-A1B6535D0845}.GPU|x64.Build.0 = Debug|Any CPU
+ {D24FCAA5-548C-4251-B226-A1B6535D0845}.GPU|x86.ActiveCfg = Debug|Any CPU
+ {D24FCAA5-548C-4251-B226-A1B6535D0845}.GPU|x86.Build.0 = Debug|Any CPU
+ {D24FCAA5-548C-4251-B226-A1B6535D0845}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {D24FCAA5-548C-4251-B226-A1B6535D0845}.Release|Any CPU.Build.0 = Release|Any CPU
+ {D24FCAA5-548C-4251-B226-A1B6535D0845}.Release|x64.ActiveCfg = Release|Any CPU
+ {D24FCAA5-548C-4251-B226-A1B6535D0845}.Release|x64.Build.0 = Release|Any CPU
+ {D24FCAA5-548C-4251-B226-A1B6535D0845}.Release|x86.ActiveCfg = Release|Any CPU
+ {D24FCAA5-548C-4251-B226-A1B6535D0845}.Release|x86.Build.0 = Release|Any CPU
+ {C23563DB-FE21-48E7-A411-87A109E4A899}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {C23563DB-FE21-48E7-A411-87A109E4A899}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {C23563DB-FE21-48E7-A411-87A109E4A899}.Debug|x64.ActiveCfg = Debug|x64
+ {C23563DB-FE21-48E7-A411-87A109E4A899}.Debug|x64.Build.0 = Debug|x64
+ {C23563DB-FE21-48E7-A411-87A109E4A899}.Debug|x86.ActiveCfg = Debug|Any CPU
+ {C23563DB-FE21-48E7-A411-87A109E4A899}.Debug|x86.Build.0 = Debug|Any CPU
+ {C23563DB-FE21-48E7-A411-87A109E4A899}.GPU|Any CPU.ActiveCfg = Debug|Any CPU
+ {C23563DB-FE21-48E7-A411-87A109E4A899}.GPU|Any CPU.Build.0 = Debug|Any CPU
+ {C23563DB-FE21-48E7-A411-87A109E4A899}.GPU|x64.ActiveCfg = Debug|x64
+ {C23563DB-FE21-48E7-A411-87A109E4A899}.GPU|x64.Build.0 = Debug|x64
+ {C23563DB-FE21-48E7-A411-87A109E4A899}.GPU|x86.ActiveCfg = Debug|Any CPU
+ {C23563DB-FE21-48E7-A411-87A109E4A899}.GPU|x86.Build.0 = Debug|Any CPU
+ {C23563DB-FE21-48E7-A411-87A109E4A899}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {C23563DB-FE21-48E7-A411-87A109E4A899}.Release|Any CPU.Build.0 = Release|Any CPU
+ {C23563DB-FE21-48E7-A411-87A109E4A899}.Release|x64.ActiveCfg = Release|x64
+ {C23563DB-FE21-48E7-A411-87A109E4A899}.Release|x64.Build.0 = Release|x64
+ {C23563DB-FE21-48E7-A411-87A109E4A899}.Release|x86.ActiveCfg = Release|Any CPU
+ {C23563DB-FE21-48E7-A411-87A109E4A899}.Release|x86.Build.0 = Release|Any CPU
+ {1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0}.Debug|x64.ActiveCfg = Debug|x64
+ {1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0}.Debug|x64.Build.0 = Debug|x64
+ {1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0}.Debug|x86.ActiveCfg = Debug|Any CPU
+ {1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0}.Debug|x86.Build.0 = Debug|Any CPU
+ {1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0}.GPU|Any CPU.ActiveCfg = Debug|Any CPU
+ {1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0}.GPU|Any CPU.Build.0 = Debug|Any CPU
+ {1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0}.GPU|x64.ActiveCfg = Debug|x64
+ {1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0}.GPU|x64.Build.0 = Debug|x64
+ {1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0}.GPU|x86.ActiveCfg = Debug|Any CPU
+ {1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0}.GPU|x86.Build.0 = Debug|Any CPU
+ {1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0}.Release|Any CPU.Build.0 = Release|Any CPU
+ {1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0}.Release|x64.ActiveCfg = Release|x64
+ {1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0}.Release|x64.Build.0 = Release|x64
+ {1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0}.Release|x86.ActiveCfg = Release|Any CPU
+ {1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0}.Release|x86.Build.0 = Release|Any CPU
+ {654A027D-1364-4729-880B-144DFE1FF5BB}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {654A027D-1364-4729-880B-144DFE1FF5BB}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {654A027D-1364-4729-880B-144DFE1FF5BB}.Debug|x64.ActiveCfg = Debug|Any CPU
+ {654A027D-1364-4729-880B-144DFE1FF5BB}.Debug|x64.Build.0 = Debug|Any CPU
+ {654A027D-1364-4729-880B-144DFE1FF5BB}.Debug|x86.ActiveCfg = Debug|Any CPU
+ {654A027D-1364-4729-880B-144DFE1FF5BB}.Debug|x86.Build.0 = Debug|Any CPU
+ {654A027D-1364-4729-880B-144DFE1FF5BB}.GPU|Any CPU.ActiveCfg = Debug|Any CPU
+ {654A027D-1364-4729-880B-144DFE1FF5BB}.GPU|Any CPU.Build.0 = Debug|Any CPU
+ {654A027D-1364-4729-880B-144DFE1FF5BB}.GPU|x64.ActiveCfg = Debug|Any CPU
+ {654A027D-1364-4729-880B-144DFE1FF5BB}.GPU|x64.Build.0 = Debug|Any CPU
+ {654A027D-1364-4729-880B-144DFE1FF5BB}.GPU|x86.ActiveCfg = Debug|Any CPU
+ {654A027D-1364-4729-880B-144DFE1FF5BB}.GPU|x86.Build.0 = Debug|Any CPU
+ {654A027D-1364-4729-880B-144DFE1FF5BB}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {654A027D-1364-4729-880B-144DFE1FF5BB}.Release|Any CPU.Build.0 = Release|Any CPU
+ {654A027D-1364-4729-880B-144DFE1FF5BB}.Release|x64.ActiveCfg = Release|Any CPU
+ {654A027D-1364-4729-880B-144DFE1FF5BB}.Release|x64.Build.0 = Release|Any CPU
+ {654A027D-1364-4729-880B-144DFE1FF5BB}.Release|x86.ActiveCfg = Release|Any CPU
+ {654A027D-1364-4729-880B-144DFE1FF5BB}.Release|x86.Build.0 = Release|Any CPU
+ {A73DF5A6-866E-4AED-9017-AA2EE86368C4}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {A73DF5A6-866E-4AED-9017-AA2EE86368C4}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {A73DF5A6-866E-4AED-9017-AA2EE86368C4}.Debug|x64.ActiveCfg = Debug|Any CPU
+ {A73DF5A6-866E-4AED-9017-AA2EE86368C4}.Debug|x64.Build.0 = Debug|Any CPU
+ {A73DF5A6-866E-4AED-9017-AA2EE86368C4}.Debug|x86.ActiveCfg = Debug|Any CPU
+ {A73DF5A6-866E-4AED-9017-AA2EE86368C4}.Debug|x86.Build.0 = Debug|Any CPU
+ {A73DF5A6-866E-4AED-9017-AA2EE86368C4}.GPU|Any CPU.ActiveCfg = Debug|Any CPU
+ {A73DF5A6-866E-4AED-9017-AA2EE86368C4}.GPU|Any CPU.Build.0 = Debug|Any CPU
+ {A73DF5A6-866E-4AED-9017-AA2EE86368C4}.GPU|x64.ActiveCfg = Debug|Any CPU
+ {A73DF5A6-866E-4AED-9017-AA2EE86368C4}.GPU|x64.Build.0 = Debug|Any CPU
+ {A73DF5A6-866E-4AED-9017-AA2EE86368C4}.GPU|x86.ActiveCfg = Debug|Any CPU
+ {A73DF5A6-866E-4AED-9017-AA2EE86368C4}.GPU|x86.Build.0 = Debug|Any CPU
+ {A73DF5A6-866E-4AED-9017-AA2EE86368C4}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {A73DF5A6-866E-4AED-9017-AA2EE86368C4}.Release|Any CPU.Build.0 = Release|Any CPU
+ {A73DF5A6-866E-4AED-9017-AA2EE86368C4}.Release|x64.ActiveCfg = Release|Any CPU
+ {A73DF5A6-866E-4AED-9017-AA2EE86368C4}.Release|x64.Build.0 = Release|Any CPU
+ {A73DF5A6-866E-4AED-9017-AA2EE86368C4}.Release|x86.ActiveCfg = Release|Any CPU
+ {A73DF5A6-866E-4AED-9017-AA2EE86368C4}.Release|x86.Build.0 = Release|Any CPU
EndGlobalSection
GlobalSection(SolutionProperties) = preSolution
HideSolutionNode = FALSE
EndGlobalSection
+ GlobalSection(NestedProjects) = preSolution
+ {FD682AC0-7B2D-45D3-8B0D-C6D678B04144} = {01A1787F-A9BE-4221-84E8-6360DD010AB6}
+ {23C28035-2FCE-41F3-9A12-E73CE8A5AE32} = {1B0918B9-65AD-4F34-A287-AF4597B27DBD}
+ {49D71826-C03D-4FA7-9BAC-22C1327E65CF} = {01A1787F-A9BE-4221-84E8-6360DD010AB6}
+ {1AB8108D-4FFE-4A16-88E7-328EAF686370} = {01A1787F-A9BE-4221-84E8-6360DD010AB6}
+ {F17AAECB-960A-4E18-A270-BAD776F0E55B} = {01A1787F-A9BE-4221-84E8-6360DD010AB6}
+ {84CA35F8-99FC-408E-8DF3-5AA175E5EFD3} = {1B0918B9-65AD-4F34-A287-AF4597B27DBD}
+ {79EB56DF-E29E-4AE2-A7D9-FE403FD919BA} = {1B0918B9-65AD-4F34-A287-AF4597B27DBD}
+ {3F5388FF-FBB4-462B-8F6F-829FFBAEB8A3} = {1B0918B9-65AD-4F34-A287-AF4597B27DBD}
+ {9738D16A-CFA0-405C-A7DF-D3D203B0CB18} = {01A1787F-A9BE-4221-84E8-6360DD010AB6}
+ {7DEA8760-E401-4872-81F3-405F185A13A0} = {1B0918B9-65AD-4F34-A287-AF4597B27DBD}
+ {3D92142F-EEDB-469B-B03C-4E38728BFE4C} = {E1A5D2B7-10AF-4876-85C0-7714EF274214}
+ {AB131FA7-B7C3-4ABF-ABDE-E059C72A613C} = {E1A5D2B7-10AF-4876-85C0-7714EF274214}
+ {D24FCAA5-548C-4251-B226-A1B6535D0845} = {E1A5D2B7-10AF-4876-85C0-7714EF274214}
+ {C23563DB-FE21-48E7-A411-87A109E4A899} = {E1A5D2B7-10AF-4876-85C0-7714EF274214}
+ {1DC32255-BA1F-4D6D-A9C9-5BD5ED71CAA0} = {E1A5D2B7-10AF-4876-85C0-7714EF274214}
+ {654A027D-1364-4729-880B-144DFE1FF5BB} = {1B0918B9-65AD-4F34-A287-AF4597B27DBD}
+ {A73DF5A6-866E-4AED-9017-AA2EE86368C4} = {1B0918B9-65AD-4F34-A287-AF4597B27DBD}
+ EndGlobalSection
GlobalSection(ExtensibilityGlobals) = postSolution
SolutionGuid = {2DEAD3CC-486B-4918-A607-50B0DE7B114A}
EndGlobalSection
diff --git a/TensorFlow.NET.sln.DotSettings b/TensorFlow.NET.sln.DotSettings
new file mode 100644
index 000000000..aba8725cc
--- /dev/null
+++ b/TensorFlow.NET.sln.DotSettings
@@ -0,0 +1,2 @@
+
+ True
\ No newline at end of file
diff --git a/data/dbpedia_subset.zip b/data/dbpedia_subset.zip
new file mode 100644
index 000000000..120ac8a10
Binary files /dev/null and b/data/dbpedia_subset.zip differ
diff --git a/data/imdb.zip b/data/imdb.zip
new file mode 100644
index 000000000..f38c48402
Binary files /dev/null and b/data/imdb.zip differ
diff --git a/data/img001.bmp b/data/img001.bmp
new file mode 100644
index 000000000..d149d76f1
Binary files /dev/null and b/data/img001.bmp differ
diff --git a/data/linear_regression.zip b/data/linear_regression.zip
new file mode 100644
index 000000000..50415d840
Binary files /dev/null and b/data/linear_regression.zip differ
diff --git a/data/lstm_crf_ner.zip b/data/lstm_crf_ner.zip
new file mode 100644
index 000000000..9e47ca934
Binary files /dev/null and b/data/lstm_crf_ner.zip differ
diff --git a/data/nb_example.npy b/data/nb_example.npy
new file mode 100644
index 000000000..4547812ca
Binary files /dev/null and b/data/nb_example.npy differ
diff --git a/data/shasta-daisy.jpg b/data/shasta-daisy.jpg
new file mode 100644
index 000000000..9a0a46eb0
Binary files /dev/null and b/data/shasta-daisy.jpg differ
diff --git a/data/text8.zip b/data/text8.zip
new file mode 100644
index 000000000..436e05b2d
Binary files /dev/null and b/data/text8.zip differ
diff --git a/data/tfhub_modules.zip b/data/tfhub_modules.zip
new file mode 100644
index 000000000..a61ba9c30
Binary files /dev/null and b/data/tfhub_modules.zip differ
diff --git a/docs/Example-fsharp.md b/docs/Example-fsharp.md
new file mode 100644
index 000000000..578543454
--- /dev/null
+++ b/docs/Example-fsharp.md
@@ -0,0 +1,55 @@
+Linear Regression in `Eager` mode:
+
+```fsharp
+#r "nuget: TensorFlow.Net"
+#r "nuget: TensorFlow.Keras"
+#r "nuget: SciSharp.TensorFlow.Redist"
+
+open Tensorflow
+open Tensorflow.NumPy
+open type Tensorflow.Binding
+open type Tensorflow.KerasApi
+
+let tf = New()
+tf.enable_eager_execution()
+
+// Parameters
+let training_steps = 1000
+let learning_rate = 0.01f
+let display_step = 100
+
+// Sample data
+let train_X =
+ np.array(3.3f, 4.4f, 5.5f, 6.71f, 6.93f, 4.168f, 9.779f, 6.182f, 7.59f, 2.167f,
+ 7.042f, 10.791f, 5.313f, 7.997f, 5.654f, 9.27f, 3.1f)
+let train_Y =
+ np.array(1.7f, 2.76f, 2.09f, 3.19f, 1.694f, 1.573f, 3.366f, 2.596f, 2.53f, 1.221f,
+ 2.827f, 3.465f, 1.65f, 2.904f, 2.42f, 2.94f, 1.3f)
+let n_samples = train_X.shape.[0]
+
+// We can set a fixed init value in order to demo
+let W = tf.Variable(-0.06f,name = "weight")
+let b = tf.Variable(-0.73f, name = "bias")
+let optimizer = keras.optimizers.SGD(learning_rate)
+
+// Run training for the given number of steps.
+for step = 1 to (training_steps + 1) do
+ // Run the optimization to update W and b values.
+ // Wrap computation inside a GradientTape for automatic differentiation.
+ use g = tf.GradientTape()
+ // Linear regression (Wx + b).
+ let pred = W * train_X + b
+ // Mean square error.
+ let loss = tf.reduce_sum(tf.pow(pred - train_Y,2)) / (2 * n_samples)
+ // should stop recording
+ // compute gradients
+ let gradients = g.gradient(loss,struct (W,b))
+
+ // Update W and b following gradients.
+ optimizer.apply_gradients(zip(gradients, struct (W,b)))
+
+ if (step % display_step) = 0 then
+ let pred = W * train_X + b
+ let loss = tf.reduce_sum(tf.pow(pred-train_Y,2)) / (2 * n_samples)
+ printfn $"step: {step}, loss: {loss.numpy()}, W: {W.numpy()}, b: {b.numpy()}"
+```
\ No newline at end of file
diff --git a/docs/Makefile b/docs/Makefile
new file mode 100644
index 000000000..69fe55ecf
--- /dev/null
+++ b/docs/Makefile
@@ -0,0 +1,19 @@
+# Minimal makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS =
+SPHINXBUILD = sphinx-build
+SOURCEDIR = source
+BUILDDIR = build
+
+# Put it first so that "make" without argument is like "make help".
+help:
+ @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
+
+.PHONY: help Makefile
+
+# Catch-all target: route all unknown targets to Sphinx using the new
+# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
+%: Makefile
+ @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
\ No newline at end of file
diff --git a/docs/README-CN.md b/docs/README-CN.md
new file mode 100644
index 000000000..9776b0fb8
--- /dev/null
+++ b/docs/README-CN.md
@@ -0,0 +1,228 @@
+
+
+**Tensorflow.NET**是AI框架[TensorFlow](https://www.tensorflow.org/)在.NET平台上的实现,支持C#和F#,可以用来搭建深度学习模型并进行训练和推理,并内置了Numpy API,可以用来进行其它科学计算。
+
+Tensorflow.NET并非对于Python的简单封装,而是基于C API的pure C#实现,因此使用时无需额外的环境,可以很方便地用NuGet直接安装使用。并且dotnet团队提供的[ML.NET](https://github.com/dotnet/machinelearning)也依赖于Tensorflow.NET,支持调用Tensorflow.NET进行训练和推理,可以很方便地融入.NET生态。
+
+与tensorflow相同,Tensorflow.NET也内置了Keras这一高级API,只要在安装Tensorflow.NET的同时安装Tensorflow.Keras就可以使用,Keras支持以模块化的方式调用模型,给模型的搭建提供了极大的便利。
+
+[](https://gitter.im/sci-sharp/community)
+[](https://ci.appveyor.com/project/Haiping-Chen/tensorflow-net)
+[](https://www.nuget.org/packages/TensorFlow.NET)
+[](https://tensorflownet.readthedocs.io/en/latest/?badge=latest)
+[](https://996.icu/#/en_US)
+[](https://mybinder.org/v2/gh/javiercp/BinderTF.NET/master?urlpath=lab)
+
+中文 | [English](https://github.com/SciSharp/TensorFlow.NET#readme)
+
+*当前主分支与Tensorflow2.10版本相对应,支持Eager Mode,同时也支持v1的静态图。*
+
+
+
+
+## Why Tensorflow.NET?
+
+`SciSharp STACK`开源社区的目标是构建.NET平台下易用的科学计算库,而Tensorflow.NET就是其中最具代表性的仓库之一。在深度学习领域Python是主流,无论是初学者还是资深开发者,模型的搭建和训练都常常使用Python写就的AI框架,比如tensorflow。但在实际应用深度学习模型的时候,又可能希望用到.NET生态,亦或只是因为.NET是自己最熟悉的领域,这时候Tensorflow.NET就有显著的优点,因为它不仅可以和.NET生态很好地贴合,其API还使得开发者很容易将Python代码迁移过来。下面的对比就是很好的例子,Python代码和C#代码有着高度相似的API,这会使得迁移的时候无需做过多修改。
+
+
+
+除了高度相似的API外,Tensorflow.NET与tensorflow也已经打通数据通道,tensorflow训练并保存的模型可以在Tensorflow.NET中直接读取并继续训练或推理,反之Tensorflow.NET保存的模型也可以在tensorflow中读取,这大大方便了模型的训练和部署。
+
+与其它类似的库比如[TensorFlowSharp](https://www.nuget.org/packages/TensorFlowSharp/)相比,Tensorflow.NET的实现更加完全,提供了更多的高级API,使用起来更为方便,更新也更加迅速。
+
+
+## 文档
+
+基本介绍与简单用例:[Tensorflow.NET Documents](https://scisharp.github.io/tensorflow-net-docs)
+
+详细文档:[The Definitive Guide to Tensorflow.NET](https://tensorflownet.readthedocs.io/en/latest/FrontCover.html)
+
+例程:[TensorFlow.NET Examples](https://github.com/SciSharp/TensorFlow.NET-Examples)
+
+运行例程常见问题:[Tensorflow.NET FAQ](tensorflowlib/README.md)
+
+## 安装与使用
+
+安装可以在NuGet包管理器中搜索包名安装,也可以用下面命令行的方式。
+
+安装分为两个部分,第一部分是Tensorflow.NET的主体:
+
+```sh
+### 安装Tensorflow.NET
+PM> Install-Package TensorFlow.NET
+
+### 安装Tensorflow.Keras
+PM> Install-Package TensorFlow.Keras
+```
+
+第二部分是计算支持部分,只需要根据自己的设备和系统选择下面之一即可:
+
+```
+### CPU版本,支持Windows、Linux和Mac
+PM> Install-Package SciSharp.TensorFlow.Redist
+
+### Windows下的GPU版本(需要安装CUDA和cuDNN)
+PM> Install-Package SciSharp.TensorFlow.Redist-Windows-GPU
+
+### Linux下的GPU版本(需要安装CUDA和cuDNN)
+PM> Install-Package SciSharp.TensorFlow.Redist-Linux-GPU
+```
+
+下面给出两个简单的例子,更多例子可以在[TensorFlow.NET Examples]中查看。
+
+### 简单例子(使用Eager Mode进行线性回归)
+
+```csharp
+using static Tensorflow.Binding;
+using static Tensorflow.KerasApi;
+using Tensorflow;
+using Tensorflow.NumPy;
+
+// Parameters
+var training_steps = 1000;
+var learning_rate = 0.01f;
+var display_step = 100;
+
+// Sample data
+var X = np.array(3.3f, 4.4f, 5.5f, 6.71f, 6.93f, 4.168f, 9.779f, 6.182f, 7.59f, 2.167f,
+ 7.042f, 10.791f, 5.313f, 7.997f, 5.654f, 9.27f, 3.1f);
+var Y = np.array(1.7f, 2.76f, 2.09f, 3.19f, 1.694f, 1.573f, 3.366f, 2.596f, 2.53f, 1.221f,
+ 2.827f, 3.465f, 1.65f, 2.904f, 2.42f, 2.94f, 1.3f);
+var n_samples = X.shape[0];
+
+// We can set a fixed init value in order to demo
+var W = tf.Variable(-0.06f, name: "weight");
+var b = tf.Variable(-0.73f, name: "bias");
+var optimizer = keras.optimizers.SGD(learning_rate);
+
+// Run training for the given number of steps.
+foreach (var step in range(1, training_steps + 1))
+{
+ // Run the optimization to update W and b values.
+ // Wrap computation inside a GradientTape for automatic differentiation.
+ using var g = tf.GradientTape();
+ // Linear regression (Wx + b).
+ var pred = W * X + b;
+ // Mean square error.
+ var loss = tf.reduce_sum(tf.pow(pred - Y, 2)) / (2 * n_samples);
+ // should stop recording
+ // Compute gradients.
+ var gradients = g.gradient(loss, (W, b));
+
+ // Update W and b following gradients.
+ optimizer.apply_gradients(zip(gradients, (W, b)));
+
+ if (step % display_step == 0)
+ {
+ pred = W * X + b;
+ loss = tf.reduce_sum(tf.pow(pred - Y, 2)) / (2 * n_samples);
+ print($"step: {step}, loss: {loss.numpy()}, W: {W.numpy()}, b: {b.numpy()}");
+ }
+}
+```
+
+这一用例也可以在[Jupyter Notebook Example](https://github.com/SciSharp/SciSharpCube)进行运行.
+
+### 简单例子(使用Keras搭建Resnet)
+
+```csharp
+using static Tensorflow.Binding;
+using static Tensorflow.KerasApi;
+using Tensorflow;
+using Tensorflow.NumPy;
+
+var layers = keras.layers;
+// input layer
+var inputs = keras.Input(shape: (32, 32, 3), name: "img");
+// convolutional layer
+var x = layers.Conv2D(32, 3, activation: "relu").Apply(inputs);
+x = layers.Conv2D(64, 3, activation: "relu").Apply(x);
+var block_1_output = layers.MaxPooling2D(3).Apply(x);
+x = layers.Conv2D(64, 3, activation: "relu", padding: "same").Apply(block_1_output);
+x = layers.Conv2D(64, 3, activation: "relu", padding: "same").Apply(x);
+var block_2_output = layers.Add().Apply(new Tensors(x, block_1_output));
+x = layers.Conv2D(64, 3, activation: "relu", padding: "same").Apply(block_2_output);
+x = layers.Conv2D(64, 3, activation: "relu", padding: "same").Apply(x);
+var block_3_output = layers.Add().Apply(new Tensors(x, block_2_output));
+x = layers.Conv2D(64, 3, activation: "relu").Apply(block_3_output);
+x = layers.GlobalAveragePooling2D().Apply(x);
+x = layers.Dense(256, activation: "relu").Apply(x);
+x = layers.Dropout(0.5f).Apply(x);
+// output layer
+var outputs = layers.Dense(10).Apply(x);
+// build keras model
+var model = keras.Model(inputs, outputs, name: "toy_resnet");
+model.summary();
+// compile keras model in tensorflow static graph
+model.compile(optimizer: keras.optimizers.RMSprop(1e-3f),
+ loss: keras.losses.SparseCategoricalCrossentropy(from_logits: true),
+ metrics: new[] { "acc" });
+// prepare dataset
+var ((x_train, y_train), (x_test, y_test)) = keras.datasets.cifar10.load_data();
+// normalize the input
+x_train = x_train / 255.0f;
+// training
+model.fit(x_train[new Slice(0, 2000)], y_train[new Slice(0, 2000)],
+ batch_size: 64,
+ epochs: 10,
+ validation_split: 0.2f);
+// save the model
+model.save("./toy_resnet_model");
+```
+
+此外,Tensorflow.NET也支持用F#搭建上述模型进行训练和推理。
+
+## Tensorflow.NET版本对应关系
+
+| TensorFlow.NET Versions | tensorflow 1.14, cuda 10.0 | tensorflow 1.15, cuda 10.0 | tensorflow 2.3, cuda 10.1 | tensorflow 2.4, cuda 11 | tensorflow 2.7, cuda 11 |tensorflow 2.10, cuda 11 |
+| -------------------------- | ------------- | -------------- | ------------- | ------------- | ------------ | ------------ |
+| tf.net 0.10x, tf.keras 0.10 | | | | | | x |
+| tf.net 0.7x, tf.keras 0.7 | | | | | x | |
+| tf.net 0.4x, tf.keras 0.5 | | | | x | | |
+| tf.net 0.3x, tf.keras 0.4 | | | x | | | |
+| tf.net 0.2x | | x | x | | | |
+| tf.net 0.15 | x | x | | | | |
+| tf.net 0.14 | x | | | | | |
+
+
+```
+tf.net 0.4x -> tf native 2.4
+tf.net 0.6x -> tf native 2.6
+tf.net 0.7x -> tf native 2.7
+tf.net 0.10x -> tf native 2.10
+...
+```
+
+如果使用过程中发现有缺失的版本,请告知我们,谢谢!
+
+请注意Tensorflow.NET与Tensorflow.Keras版本存在一一对应关系,请安装与Tensorflow.NET对应的Tensorflow.Keras版本。
+
+## 参与我们的开发:
+
+我们欢迎任何人的任何形式的贡献!无论是文档中的错误纠正,新特性提议,还是BUG修复等等,都会使得Tensorflow.NET项目越来越好,Tensorflow.NET的全体开发者也会积极帮助解决您提出的问题。
+
+下面任何一种形式都可以帮助Tensorflow.NET越来越好:
+
+* Star和分享Tensorflow.NET项目
+* 为Tensorflow.NET添加更多的用例
+* 在issue中告知我们Tensorflow.NET目前相比tensorflow缺少的API或者没有对齐的特性
+* 在issue中提出Tensorflow.NET存在的BUG或者可以改进的地方
+* 在待办事项清单中选择一个进行或者解决某个issue
+* 帮助我们完善文档,这也十分重要
+
+
+## 支持我们
+我们推出了[TensorFlow.NET实战](https://item.jd.com/13441549.html)这本书,包含了Tensorflow.NET主要开发者编写的讲解与实战例程,欢迎您的购买,希望这本书可以给您带来帮助。
+
+
+
+
+
+
+## 联系我们
+
+可以在 [Twitter](https://twitter.com/ScisharpStack), [Facebook](https://www.facebook.com/scisharp.stack.9), [Medium](https://medium.com/scisharp), [LinkedIn](https://www.linkedin.com/company/scisharp-stack/)中关注我们,也可以在[Gitter](https://gitter.im/sci-sharp/community)中与项目开发者以及其它使用者进行沟通交流,也欢迎在仓库中提起issue。
+
+TensorFlow.NET is a part of [SciSharp STACK](https://scisharp.github.io/SciSharp/)
+
+
diff --git a/docs/README.md b/docs/README.md
index e69de29bb..0e3c00484 100644
--- a/docs/README.md
+++ b/docs/README.md
@@ -0,0 +1,21 @@
+### Instll Sphinx
+```cmd
+pip install sphinx
+pip install recommonmark
+pip install sphinx_rtd_theme
+```
+
+### Init the docs
+```cmd
+sphinx-quickstarts
+```
+
+### Build the docs
+```cmd
+make html
+```
+
+
+
+Access the compiled docs: [https://tensorflownet.readthedocs.io](https://tensorflownet.readthedocs.io/)
+
diff --git a/docs/RELEASE.md b/docs/RELEASE.md
new file mode 100644
index 000000000..62a1be238
--- /dev/null
+++ b/docs/RELEASE.md
@@ -0,0 +1,44 @@
+# Release Notes
+
+**Thanks to our Contributors!**
+
+This release contains contributions from many people at SciSharp as well as the external contributors.
+
+**Release Date 02/06/2021**
+
+### TensorFlow.Binding v0.33.0
+
+* Improve memory usage
+* Fix minor bugs
+
+### TensorFlow.Keras v0.4.0
+
+* Add Subtract layer
+
+* Add model.load_weights and model.save_weights
+
+* Fix memory leak issue
+
+* Support to build YOLOv3 object detection model
+
+
+
+**Release Date 01/09/2021**
+
+### TensorFlow.Binding v0.32.0
+
+* Fix input `dtype` for `MapDataset`.
+* Fix `image_dataset_from_directory` function.
+* Fix `tf.transpose`.
+* Add `array_ops.where_v2`, `array_ops.select_v2`, `array_ops.softplus`.
+* Add `dataset.dataset_cardinality`.
+
+### TensorFlow.Keras v0.3.0
+
+* Fix `weight` init value for `double` type in `compute_weighted_loss`.
+* Add `MeanSquaredError `, `MeanAbsolutePercentageError `, `MeanAbsoluteError` and `MeanSquaredLogarithmicError` loss functions.
+* `Sequential` model API works.
+* Add `ShellProgressBar` to show training progress better.
+
+
+
diff --git a/docs/The-Definitive-Guide/CH_1 Tensor.md b/docs/The-Definitive-Guide/CH_1 Tensor.md
deleted file mode 100644
index 1d09ba42d..000000000
--- a/docs/The-Definitive-Guide/CH_1 Tensor.md
+++ /dev/null
@@ -1,41 +0,0 @@
-# 第一章: Tensor
-
-### Represents one of the outputs of an Operation
-
-### 表示一个操作的输出
-
-
-
-##### What is Tensor?
-
-##### Tensor 是什么?
-
-Tensor holds a multi-dimensional array of elements of a single data type which is very similar with numpy's ndarray.
-
-Tensor是一个具有单一数据类型的多维数组容器,非常类似于numpy里的ndarray。如果你对numpy非常熟悉的话,那么对Tensor的理解会相当容易。
-
-
-
-##### How to create a Tensor?
-
-##### 如何创建一个Tensor?
-
-
-
-
-
-TF uses column major order.
-
-TF 采用的是按列存储模式,如果我们用NumSharp产生一个2 X 3的矩阵,如果按顺序从0到5访问数据的话,是不会得到1-6的数字的,而是得到1,4, 2, 5, 3, 6这个顺序的一组数字。
-
-```cs
-// generate a matrix:[[1, 2, 3], [4, 5, 6]]
-var nd = np.array(1f, 2f, 3f, 4f, 5f, 6f).reshape(2, 3);
-// the index will be 0 2 4 1 3 5, it's column-major order.
-```
-
-
-
-
-
-
diff --git a/docs/The-Definitive-Guide/CH_3 Operation.md b/docs/The-Definitive-Guide/CH_3 Operation.md
deleted file mode 100644
index e69de29bb..000000000
diff --git a/docs/The-Definitive-Guide/CH_4 Variable.md b/docs/The-Definitive-Guide/CH_4 Variable.md
deleted file mode 100644
index e69de29bb..000000000
diff --git a/docs/The-Definitive-Guide/CH_5 Session.md b/docs/The-Definitive-Guide/CH_5 Session.md
deleted file mode 100644
index e69de29bb..000000000
diff --git a/docs/The-Definitive-Guide/CH_6 Graph.md b/docs/The-Definitive-Guide/CH_6 Graph.md
deleted file mode 100644
index e69de29bb..000000000
diff --git a/docs/The-Definitive-Guide/Foreword.md b/docs/The-Definitive-Guide/Foreword.md
deleted file mode 100644
index 0a5232f9f..000000000
--- a/docs/The-Definitive-Guide/Foreword.md
+++ /dev/null
@@ -1,9 +0,0 @@
-# Foreword 前言
-
-One of the most nerve-wracking periods when releasing the first version of an open source project occurs when the gitter community is created. You are all alone, eagerly hoping and wishing for the first user to come along. I still vividly remember those days.
-
-
-
-当我开始写这个项目的时候,我同时也在整理编码过程时候的想法,Tensorflow是个巨大最复杂的工程,很容易超出个人能力范围,所以想尽可能地把当时的思路记录下来,也想趁着记录整理的过程把思路理清。
-
-When I started writing this project, I was also sorting out the idea of the coding process. Tensorflow is a huge and complicated project, and it is easy to go beyond the scope of personal ability. Therefore, I want to record the thoughts at the time as much as possible. The process of recording and sorting clears the way of thinking.
\ No newline at end of file
diff --git a/docs/The-Definitive-Guide/Preface.md b/docs/The-Definitive-Guide/Preface.md
deleted file mode 100644
index ad91bf5ca..000000000
--- a/docs/The-Definitive-Guide/Preface.md
+++ /dev/null
@@ -1,35 +0,0 @@
-
-
-# Preface 序
-
-
-
-
-
-
-
-
-
-
-
-
-
-Why do I start the Tensorflow.NET project?
-
-我为什么会写Tensorflow.NET?
-
-再过几天就是2018年圣诞节,看着孩子一天天长大并懂事,感慨时间过得太快。IT技术更新换代比以往任何时候都更快,各种前后端技术纷纷涌现。大数据,人工智能和区块链,容器技术和微服务,分布式计算和无服务器技术,让人眼花缭乱。Amazon AI服务接口宣称不需要具有任何机器学习经验的工程师就能使用,让像我这样刚静下心来学习了两年并打算将来转行做AI架构的想法泼了一桶凉水。
-
-TensorFlow is an open source project for machine learning especially for deep learning. It's used for both research and production at Google company. It's designed according to dataflow programming pattern across a range of tasks.
-
-
-
-为了避免混淆,本书中对TensorFlow中定义的特有类不进行翻译,比如Tensor, Graph, Shape这些词都会保留英文名称。
-
-
-
-术语简称:
-
-TF: Google TensorFlow
-
-TF.NET: Tensorflow.NET
\ No newline at end of file
diff --git a/docs/The-Definitive-Guide/README.md b/docs/The-Definitive-Guide/README.md
deleted file mode 100644
index 926aded7b..000000000
--- a/docs/The-Definitive-Guide/README.md
+++ /dev/null
@@ -1,26 +0,0 @@
-# The Definitive Guide to Tensorflow.NET
-# Tensorflow.NET 权威指南
-
-
-
-
-### The CSharp binding for Google's TensorFlow - An Open Source Machine Learning Framework for Everyone
-### 谷歌TensorFlow的C#封装库,开源机器学习框架。
-
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/docs/The-Definitive-Guide/Table of Contents.md b/docs/The-Definitive-Guide/Table of Contents.md
deleted file mode 100644
index 6551e9262..000000000
--- a/docs/The-Definitive-Guide/Table of Contents.md
+++ /dev/null
@@ -1,18 +0,0 @@
-# Table of Contents
-
-### Foreword...........................................................................................xxi
-
-### Preface..............................................................................................xxiii
-
-## Part I. Getting Started
-
-##### 1. You Know, for Machine Learning............................................................................................ 3
-
- Installing Tensorflow.NET
- Running Tensorflow.NET
- Talking to Tensorflow.NET
-
-## Part II. Tensorflow.NET in Depth
-
-## Part III. Dealing with Human Language
-
diff --git a/docs/_config.yml b/docs/_config.yml
new file mode 100644
index 000000000..c4192631f
--- /dev/null
+++ b/docs/_config.yml
@@ -0,0 +1 @@
+theme: jekyll-theme-cayman
\ No newline at end of file
diff --git a/docs/assets/Cover.psd b/docs/assets/Cover.psd
new file mode 100644
index 000000000..665487608
Binary files /dev/null and b/docs/assets/Cover.psd differ
diff --git a/docs/assets/Logo.md b/docs/assets/Logo.md
new file mode 100644
index 000000000..21e7858ae
--- /dev/null
+++ b/docs/assets/Logo.md
@@ -0,0 +1,3 @@
+TensorFlow.NET logo (c) 2019 by Meinrad Recheis.
+
+The logo is based on the original Tensorflow logo which is copyrighted by the respective creator.
\ No newline at end of file
diff --git a/docs/assets/TensorBoard-nn.png b/docs/assets/TensorBoard-nn.png
new file mode 100644
index 000000000..23ccc3db5
Binary files /dev/null and b/docs/assets/TensorBoard-nn.png differ
diff --git a/docs/assets/WeChatCollection.jpg b/docs/assets/WeChatCollection.jpg
new file mode 100644
index 000000000..587b54991
Binary files /dev/null and b/docs/assets/WeChatCollection.jpg differ
diff --git a/docs/assets/cnn-result.png b/docs/assets/cnn-result.png
new file mode 100644
index 000000000..e1cea1e48
Binary files /dev/null and b/docs/assets/cnn-result.png differ
diff --git a/docs/assets/cnn.png b/docs/assets/cnn.png
new file mode 100644
index 000000000..78c7a6808
Binary files /dev/null and b/docs/assets/cnn.png differ
diff --git a/docs/assets/eager-mode-add.png b/docs/assets/eager-mode-add.png
new file mode 100644
index 000000000..e3700fa62
Binary files /dev/null and b/docs/assets/eager-mode-add.png differ
diff --git a/docs/assets/graph_vis_animation.gif b/docs/assets/graph_vis_animation.gif
new file mode 100644
index 000000000..556383270
Binary files /dev/null and b/docs/assets/graph_vis_animation.gif differ
diff --git a/docs/assets/mnist.png b/docs/assets/mnist.png
new file mode 100644
index 000000000..824818721
Binary files /dev/null and b/docs/assets/mnist.png differ
diff --git a/docs/assets/nn-result.png b/docs/assets/nn-result.png
new file mode 100644
index 000000000..7957b8214
Binary files /dev/null and b/docs/assets/nn-result.png differ
diff --git a/docs/assets/nn.png b/docs/assets/nn.png
new file mode 100644
index 000000000..8fbb6f9b8
Binary files /dev/null and b/docs/assets/nn.png differ
diff --git a/docs/assets/performance-comparison.jpg b/docs/assets/performance-comparison.jpg
new file mode 100644
index 000000000..382f7ab61
Binary files /dev/null and b/docs/assets/performance-comparison.jpg differ
diff --git a/docs/assets/syntax-comparision.png b/docs/assets/syntax-comparision.png
new file mode 100644
index 000000000..d42b5cb9c
Binary files /dev/null and b/docs/assets/syntax-comparision.png differ
diff --git a/docs/assets/tf.net.architecture.svg b/docs/assets/tf.net.architecture.svg
new file mode 100644
index 000000000..933fd8027
--- /dev/null
+++ b/docs/assets/tf.net.architecture.svg
@@ -0,0 +1,370 @@
+
+
+
+
diff --git a/docs/assets/tf.net.icon-purple.svg b/docs/assets/tf.net.icon-purple.svg
new file mode 100644
index 000000000..7498987b8
--- /dev/null
+++ b/docs/assets/tf.net.icon-purple.svg
@@ -0,0 +1,141 @@
+
+
+
+
diff --git a/docs/assets/tf.net.icon-purple128.png b/docs/assets/tf.net.icon-purple128.png
new file mode 100644
index 000000000..d79ee7962
Binary files /dev/null and b/docs/assets/tf.net.icon-purple128.png differ
diff --git a/docs/assets/tf.net.icon-purple512.png b/docs/assets/tf.net.icon-purple512.png
new file mode 100644
index 000000000..0aa94f168
Binary files /dev/null and b/docs/assets/tf.net.icon-purple512.png differ
diff --git a/docs/assets/tf.net.icon-transparent.svg b/docs/assets/tf.net.icon-transparent.svg
new file mode 100644
index 000000000..e361115b3
--- /dev/null
+++ b/docs/assets/tf.net.icon-transparent.svg
@@ -0,0 +1,141 @@
+
+
+
+
diff --git a/docs/assets/tf.net.icon-transparent128.png b/docs/assets/tf.net.icon-transparent128.png
new file mode 100644
index 000000000..7831c9eb3
Binary files /dev/null and b/docs/assets/tf.net.icon-transparent128.png differ
diff --git a/docs/assets/tf.net.icon-transparent512.png b/docs/assets/tf.net.icon-transparent512.png
new file mode 100644
index 000000000..57227d9a9
Binary files /dev/null and b/docs/assets/tf.net.icon-transparent512.png differ
diff --git a/docs/assets/tf.net.logo.png b/docs/assets/tf.net.logo.png
new file mode 100644
index 000000000..ceebc184d
Binary files /dev/null and b/docs/assets/tf.net.logo.png differ
diff --git a/docs/assets/tf.net.logo.svg b/docs/assets/tf.net.logo.svg
new file mode 100644
index 000000000..b6e048ad8
--- /dev/null
+++ b/docs/assets/tf.net.logo.svg
@@ -0,0 +1,210 @@
+
+
+
+
diff --git a/docs/assets/tf.net.logo512.png b/docs/assets/tf.net.logo512.png
new file mode 100644
index 000000000..2e1b4eff9
Binary files /dev/null and b/docs/assets/tf.net.logo512.png differ
diff --git a/docs/assets/tf2.jpg b/docs/assets/tf2.jpg
new file mode 100644
index 000000000..c4ebd31ec
Binary files /dev/null and b/docs/assets/tf2.jpg differ
diff --git a/docs/assets/tf2.psd b/docs/assets/tf2.psd
new file mode 100644
index 000000000..1cde30235
Binary files /dev/null and b/docs/assets/tf2.psd differ
diff --git a/docs/make.bat b/docs/make.bat
new file mode 100644
index 000000000..4d9eb83d9
--- /dev/null
+++ b/docs/make.bat
@@ -0,0 +1,35 @@
+@ECHO OFF
+
+pushd %~dp0
+
+REM Command file for Sphinx documentation
+
+if "%SPHINXBUILD%" == "" (
+ set SPHINXBUILD=sphinx-build
+)
+set SOURCEDIR=source
+set BUILDDIR=build
+
+if "%1" == "" goto help
+
+%SPHINXBUILD% >NUL 2>NUL
+if errorlevel 9009 (
+ echo.
+ echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
+ echo.installed, then set the SPHINXBUILD environment variable to point
+ echo.to the full path of the 'sphinx-build' executable. Alternatively you
+ echo.may add the Sphinx directory to PATH.
+ echo.
+ echo.If you don't have Sphinx installed, grab it from
+ echo.http://sphinx-doc.org/
+ exit /b 1
+)
+
+%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
+goto end
+
+:help
+%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
+
+:end
+popd
diff --git a/docs/source/Constant.md b/docs/source/Constant.md
new file mode 100644
index 000000000..dd6aa3bf0
--- /dev/null
+++ b/docs/source/Constant.md
@@ -0,0 +1,85 @@
+# Chapter 2. Constant
+
+In TensorFlow, a constant is a special Tensor that cannot be modified while the graph is running. Like in a linear model `y = ax + b`, constant `b` can be represented as a `Constant` Tensor. Since the constant is a Tensor, it also has all the data characteristics of Tensor, including:
+
+* value: scalar value or constant list matching the data type defined in TensorFlow;
+* dtype: data type;
+* shape: dimensions;
+* name: constant's name;
+
+
+
+### How to create a Constant
+
+TensorFlow provides a handy function to create a Constant. In TF.NET, you can use the same function name `tf.constant` to create it. TF.NET takes the same name as python binding for the API. Naming, although this will make developers who are used to C# naming convention feel uncomfortable, but after careful consideration, I decided to give up the C# convention naming method. One of reason is for model developer, they don't have to learn a totally new different APIs.
+
+Initialize a scalar constant:
+
+```csharp
+var c1 = tf.constant(3); // int
+var c2 = tf.constant(1.0f); // float
+var c3 = tf.constant(2.0); // double
+var c4 = tf.constant("Big Tree"); // string
+```
+
+Initialize a constant through ndarray:
+
+TF.NET works very well with `NumSharp`'s `NDArray`. You can create a tensor from .NET primitive data type and NDArray as well. An `ndarray` is a (usually fixed-size) multidimensional container of items of the same type and size. The number of dimensions and items in an array is defined by its `shape`, which is a tuple of N non-negative integers that specify the sizes of each dimension.
+
+```csharp
+// dtype=int, shape=(2, 3)
+var nd = np.array(new int[,]
+{
+ {1, 2, 3},
+ {4, 5, 6}
+});
+var tensor = tf.constant(nd);
+```
+
+### Dive in Constant
+
+Now let's explore how `constant` works in `eager` mode inside the black box.
+
+Let's continue using the last examples, we're going to initialize a tensor in an ndarray of `[shape(2, 3), int32]`.
+
+##### NDArray
+
+The first thing we need to know is about `ndarray`'s memory model. The ndarray memory model is a very important data structure, and almost all underlying computation are inseparable from this datb a structure. One fundamental aspect of the ndarray is that an array is seen as a "chunk" of memory starting at some location. The interpretation of this memory depends on the stride information. A segment of memory is inherently 1-dimensional, and there are many different schemes for arranging the items of an N-dimensional array in a 1-dimensional block. `ndarray` objects can accommodate any strided indexing scheme. In a strided scheme, the N-dimensional index corresponds to the offset (in bytes) : .
+
+
+
+If we take a look at the real memory allocation in Visual Studio, below diagram helps us understand the data structure more intuitively. The strides keep track the size of every single dimension, help identify the actual offset in heap memory. The formula to calculate offset is: `offset = i * strides[0] + j * strides[1]`.
+
+For example: if you want to seek the value in `[1, 1]`, you just need to calculate `1 * 3 + 1 * 1 = 4`, converted to pointer is `0x000002556B194260 + 4 = 0x000002556B194264` where has a value `05`.
+
+
+
+Through the above diagram, we know how the data is stored in memory, and then we will look at how the data is transferred to `TensorFlow`.
+
+##### Tensor
+
+If you don't understand very well what `Tensor` is, you can go back to the chapter `Tensor` there is pretty much explanation if you skipped that chapter. Tensor is actually an NDArray that is with more than 2 dimensions.
+
+TensorFlow will decide whether to copy the data or use the same pointer. Normally speaking, it's more safe whenever you copy data for the following process, especially in interoperating between .NET runtime and C++ runtime that they all have their own garbage collection (GC) mechanism, application will crash if someone access a block of destroyed memory. `TF_STRING` and `TF_RESOURCE` tensors have a different representation in `TF_Tensor` than they do in `tensorflow::Tensor`. Other types have the same representation, so copy only if it is safe to do so.
+
+
+
+Before tensorflow is creating the `TF_Tensor`, it checks the shape and data size. If the size doesn't match, it will return `nullptr` pointer.
+
+##### Get the data of Tensor
+
+For `eager` mode, it's pretty simple to view the actual value in a `tensor`.
+
+```csharp
+var data = tensor.numpy()
+```
+
+The `data` will be a `ndarray` variable.
+
+##### Other functions to create a Constant
+
+* tf.zeros
+* tf.zeros_like
+* tf.ones
+* tf.ones_like
+* tf.fill
\ No newline at end of file
diff --git a/docs/source/ConvolutionNeuralNetwork.md b/docs/source/ConvolutionNeuralNetwork.md
new file mode 100644
index 000000000..6b47c9d8d
--- /dev/null
+++ b/docs/source/ConvolutionNeuralNetwork.md
@@ -0,0 +1,350 @@
+# Chapter. Convolution Neural Network
+
+In this chapter, we'll implement a simple Convolutional Neural Network model. We'll implement this model to classify MNIST dataset.
+
+
+
+The structure of the neural network we're going to build is as follows. The hand-written digits images of the MNIST data which has 10 classes (from 0 to 9). The network is with 2 convolutional layers followed by 2 full-connected layers at the end.
+
+
+
+Get started with the implementation:
+
+1. **Prepare data**
+
+ MNIST is dataset of handwritten digits which contains 55,000 examples for training, 5,000 examples for validation and 10,000 example for testing. The digits have been size-normalized and centered in a fixed-size image (28 x 28 pixels) with values from 0 and 1.Each image has been flattened and converted to a 1-D array of 784 features. It's also kind of benchmark of datasets for deep learning.
+
+ 
+
+ We define some variables makes it easier to modify them later.
+
+ ```csharp
+ using System;
+ using NumSharp;
+ using Tensorflow;
+ using TensorFlowNET.Examples.Utility;
+ using static Tensorflow.Python;
+ ```
+
+ ```csharp
+ const int img_h = 28;
+ const int img_w = 28;
+ int n_classes = 10; // Number of classes, one class per digit
+ int n_channels = 1;
+ ```
+
+ We'll write the function which automatically loads the MNIST data and returns it in our desired shape and format. There is an MNIST data helper to make life easier.
+
+ ```csharp
+ Datasets mnist;
+ public void PrepareData()
+ {
+ mnist = MnistDataSet.read_data_sets("mnist", one_hot: true);
+ }
+ ```
+
+ Other than a function for loading the images and corresponding labels, we still need three more functions:
+
+ **reformat:** reformats the data to the format acceptable for convolutional layer.
+
+ ```csharp
+ private (NDArray, NDArray) Reformat(NDArray x, NDArray y)
+ {
+ var (img_size, num_ch, num_class) = (np.sqrt(x.shape[1]), 1, len(np.unique(np.argmax(y, 1))));
+ var dataset = x.reshape(x.shape[0], img_size, img_size, num_ch).astype(np.float32);
+ //y[0] = np.arange(num_class) == y[0];
+ //var labels = (np.arange(num_class) == y.reshape(y.shape[0], 1, y.shape[1])).astype(np.float32);
+ return (dataset, y);
+ }
+ ```
+
+
+
+ **randomize**: which randomizes the order of images and their labels. At the beginning of each epoch, we will re-randomize the order of data samples to make sure that the trained model is not sensitive to the order of data.
+
+ ```csharp
+ private (NDArray, NDArray) randomize(NDArray x, NDArray y)
+ {
+ var perm = np.random.permutation(y.shape[0]);
+
+ np.random.shuffle(perm);
+ return (mnist.train.images[perm], mnist.train.labels[perm]);
+ }
+ ```
+
+ **get_next_batch**: which only selects a few number of images determined by the batch_size variable (as per Stochastic Gradient Descent method).
+
+ ```csharp
+ private (NDArray, NDArray) get_next_batch(NDArray x, NDArray y, int start, int end)
+ {
+ var x_batch = x[$"{start}:{end}"];
+ var y_batch = y[$"{start}:{end}"];
+ return (x_batch, y_batch);
+ }
+ ```
+
+2. **Set Hyperparameters**
+
+ There're about 55,000 images in training set, it takes a long time to calculate the gradient of the model using all there images. Therefore we use a small batch of images in each iteration of the optimizer by Stochastic Gradient Descent.
+
+ * epoch: one forward pass and one backward pass of all the training examples.
+ * batch size: the number of training examples in one forward/backward pass. The higher the batch size, the more memory space you'll need.
+ * iteration: one forward pass and one backward pass of one batch of images the training examples.
+
+ ```csharp
+ int epochs = 10;
+ int batch_size = 100;
+ float learning_rate = 0.001f;
+ int display_freq = 200; // Frequency of displaying the training results
+ ```
+
+3. **Network configuration**
+
+ 1st convolutional layer:
+
+ ```csharp
+ int filter_size1 = 5; // Convolution filters are 5 x 5 pixels.
+ int num_filters1 = 16; // There are 16 of these filters.
+ int stride1 = 1; // The stride of the sliding window
+ ```
+
+ 2nd convolutional layer:
+
+ ```csharp
+ int filter_size2 = 5; // Convolution filters are 5 x 5 pixels.
+ int num_filters2 = 32;// There are 32 of these filters.
+ int stride2 = 1; // The stride of the sliding window
+ ```
+
+ Fully-connected layer:
+
+ ```csharp
+ h1 = 128 # Number of neurons in fully-connected layer.
+ ```
+
+
+
+4. **Building the neural network**
+
+ Let's make some functions to help build computation graph.
+
+ **variables**: We need to define two variables `W` and `b` to construct our linear model. We use `Tensorflow Variables` of proper size and initialization to define them.
+
+ ```csharp
+ // Create a weight variable with appropriate initialization
+ private RefVariable weight_variable(string name, int[] shape)
+ {
+ var initer = tf.truncated_normal_initializer(stddev: 0.01f);
+ return tf.get_variable(name,
+ dtype: tf.float32,
+ shape: shape,
+ initializer: initer);
+ }
+
+ // Create a bias variable with appropriate initialization
+ private RefVariable bias_variable(string name, int[] shape)
+ {
+ var initial = tf.constant(0f, shape: shape, dtype: tf.float32);
+ return tf.get_variable(name,
+ dtype: tf.float32,
+ initializer: initial);
+ }
+ ```
+
+ **2D convolution layer**: This layer creates a convolution kernel that is convolved with the layer input to produce a tensor of outputs.
+
+ ```csharp
+ private Tensor conv_layer(Tensor x, int filter_size, int num_filters, int stride, string name)
+ {
+ return with(tf.variable_scope(name), delegate {
+
+ var num_in_channel = x.shape[x.NDims - 1];
+ var shape = new[] { filter_size, filter_size, num_in_channel, num_filters };
+ var W = weight_variable("W", shape);
+ // var tf.summary.histogram("weight", W);
+ var b = bias_variable("b", new[] { num_filters });
+ // tf.summary.histogram("bias", b);
+ var layer = tf.nn.conv2d(x, W,
+ strides: new[] { 1, stride, stride, 1 },
+ padding: "SAME");
+ layer += b;
+ return tf.nn.relu(layer);
+ });
+ }
+ ```
+
+ **max-pooling layer**: Max pooling operation for temporal data.
+
+ ```csharp
+ private Tensor max_pool(Tensor x, int ksize, int stride, string name)
+ {
+ return tf.nn.max_pool(x,
+ ksize: new[] { 1, ksize, ksize, 1 },
+ strides: new[] { 1, stride, stride, 1 },
+ padding: "SAME",
+ name: name);
+ }
+ ```
+
+ **flatten_layer**: Flattens the output of the convolutional layer to be fed into fully-connected layer.
+
+ ```csharp
+ private Tensor flatten_layer(Tensor layer)
+ {
+ return with(tf.variable_scope("Flatten_layer"), delegate
+ {
+ var layer_shape = layer.TensorShape;
+ var num_features = layer_shape[new Slice(1, 4)].Size;
+ var layer_flat = tf.reshape(layer, new[] { -1, num_features });
+
+ return layer_flat;
+ });
+ }
+ ```
+
+
+
+ **fully-connected layer**: Neural network consists of stacks of fully-connected (dense) layers. Having the weight (W) and bias (b) variables, a fully-connected layer is defined as `activation(W x X + b)`. The complete `fc_layer` function is as below:
+
+ ```csharp
+ private Tensor fc_layer(Tensor x, int num_units, string name, bool use_relu = true)
+ {
+ return with(tf.variable_scope(name), delegate
+ {
+ var in_dim = x.shape[1];
+
+ var W = weight_variable("W_" + name, shape: new[] { in_dim, num_units });
+ var b = bias_variable("b_" + name, new[] { num_units });
+
+ var layer = tf.matmul(x, W) + b;
+ if (use_relu)
+ layer = tf.nn.relu(layer);
+
+ return layer;
+ });
+ }
+ ```
+
+ **inputs**: Now we need to define the proper tensors to feed in the input to our model. Placeholder variable is the suitable choice for the input images and corresponding labels. This allow us to change the inputs (images and labels) to the TensorFlow graph.
+
+ ```csharp
+ with(tf.name_scope("Input"), delegate
+ {
+ // Placeholders for inputs (x) and outputs(y)
+ x = tf.placeholder(tf.float32, shape: (-1, img_h, img_w, n_channels), name: "X");
+ y = tf.placeholder(tf.float32, shape: (-1, n_classes), name: "Y");
+ });
+ ```
+
+ Placeholder `y` is the variable for the true labels associated with the images that were input in the placeholder variable `x`. It holds an arbitrary number of labels and each label is a vector of length `num_classes` which is 10.
+
+ **network layers**: After creating the proper input, we have to pass it to our model. Since we have a neural network, we can stack multiple fully-connected layers using `fc_layer` method. Note that we will not use any activation function (use_relu = false) in the last layer. The reason is that we can use `tf.nn.softmax_cross_entropy_with_logits` to calculate the loss.
+
+ ```csharp
+ var conv1 = conv_layer(x, filter_size1, num_filters1, stride1, name: "conv1");
+ var pool1 = max_pool(conv1, ksize: 2, stride: 2, name: "pool1");
+ var conv2 = conv_layer(pool1, filter_size2, num_filters2, stride2, name: "conv2");
+ var pool2 = max_pool(conv2, ksize: 2, stride: 2, name: "pool2");
+ var layer_flat = flatten_layer(pool2);
+ var fc1 = fc_layer(layer_flat, h1, "FC1", use_relu: true);
+ var output_logits = fc_layer(fc1, n_classes, "OUT", use_relu: false);
+ ```
+
+ **loss function, optimizer, accuracy, prediction**: After creating the network, we have to calculate the loss and optimize it, we have to calculate the `prediction` and `accuracy`.
+
+ ```csharp
+ with(tf.variable_scope("Train"), delegate
+ {
+
+
+ with(tf.variable_scope("Optimizer"), delegate
+ {
+ optimizer = tf.train.AdamOptimizer(learning_rate: learning_rate, name: "Adam-op").minimize(loss);
+ });
+
+ with(tf.variable_scope("Accuracy"), delegate
+ {
+ var correct_prediction = tf.equal(tf.argmax(output_logits, 1), tf.argmax(y, 1), name: "correct_pred");
+ accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name: "accuracy");
+ });
+
+ with(tf.variable_scope("Prediction"), delegate
+ {
+ cls_prediction = tf.argmax(output_logits, axis: 1, name: "predictions");
+ });
+ });
+ ```
+
+ **initialize variables**: We have to invoke a variable initializer operation to initialize all variables.
+
+```csharp
+ var init = tf.global_variables_initializer();
+```
+
+5. **Train**
+
+ After creating the graph, we can train our model. To train the model, we have to create a session and run the graph in the session.
+
+ ```csharp
+ // Number of training iterations in each epoch
+ var num_tr_iter = y_train.len / batch_size;
+
+ var init = tf.global_variables_initializer();
+ sess.run(init);
+
+ float loss_val = 100.0f;
+ float accuracy_val = 0f;
+
+ foreach (var epoch in range(epochs))
+ {
+ print($"Training epoch: {epoch + 1}");
+ // Randomly shuffle the training data at the beginning of each epoch
+ (x_train, y_train) = mnist.Randomize(x_train, y_train);
+
+ foreach (var iteration in range(num_tr_iter))
+ {
+ var start = iteration * batch_size;
+ var end = (iteration + 1) * batch_size;
+ var (x_batch, y_batch) = mnist.GetNextBatch(x_train, y_train, start, end);
+
+ // Run optimization op (backprop)
+ sess.run(optimizer, new FeedItem(x, x_batch), new FeedItem(y, y_batch));
+
+ if (iteration % display_freq == 0)
+ {
+ // Calculate and display the batch loss and accuracy
+ var result = sess.run(new[] { loss, accuracy }, new FeedItem(x, x_batch), new FeedItem(y, y_batch));
+ loss_val = result[0];
+ accuracy_val = result[1];
+ print($"iter {iteration.ToString("000")}: Loss={loss_val.ToString("0.0000")}, Training Accuracy={accuracy_val.ToString("P")}");
+ }
+ }
+
+ // Run validation after every epoch
+ var results1 = sess.run(new[] { loss, accuracy }, new FeedItem(x, x_valid), new FeedItem(y, y_valid));
+ loss_val = results1[0];
+ accuracy_val = results1[1];
+ print("---------------------------------------------------------");
+ print($"Epoch: {epoch + 1}, validation loss: {loss_val.ToString("0.0000")}, validation accuracy: {accuracy_val.ToString("P")}");
+ print("---------------------------------------------------------");
+ }
+ ```
+
+6. **Test**
+
+ After the training is done, we have to test our model to see how good it performs on a new dataset.
+
+ ```csharp
+ public void Test(Session sess)
+ {
+ var result = sess.run(new[] { loss, accuracy }, new FeedItem(x, x_test), new FeedItem(y, y_test));
+ loss_test = result[0];
+ accuracy_test = result[1];
+ print("---------------------------------------------------------");
+ print($"Test loss: {loss_test.ToString("0.0000")}, test accuracy: {accuracy_test.ToString("P")}");
+ print("---------------------------------------------------------");
+ }
+ ```
+
+
+
diff --git a/docs/source/EagerMode.md b/docs/source/EagerMode.md
new file mode 100644
index 000000000..ded56d41f
--- /dev/null
+++ b/docs/source/EagerMode.md
@@ -0,0 +1,3 @@
+# Chapter 4. Eager Mode
+
+TensorFlow's eager execution is an imperative programming environment that evaluates operations immediately, without building graphs: operations return concrete values instead of constructing a computational graph to run later. This makes it easy to get started with TensorFlow and debug models, and it reduces boilerplate as well.
\ No newline at end of file
diff --git a/docs/source/Foreword.md b/docs/source/Foreword.md
new file mode 100644
index 000000000..256094f57
--- /dev/null
+++ b/docs/source/Foreword.md
@@ -0,0 +1,11 @@
+# Foreword
+
+One of the most nerve-wracking periods when releasing the first version of an open source project occurs when the [gitter](https://gitter.im/sci-sharp/community) community is created. You are all alone, eagerly hoping and wishing for the first user to come along. I still vividly remember those days.
+
+
+
+TensorFlow.NET is my third open source project. BotSharp and NumSharp are the first two. The response is pretty good. I also got a lot of stars on github. Although the first two projects are very difficult, I can't admit that TensorFlow.NET is much more difficult than the previous two, and it is an area I have never been involved with. Mainly related to GPU parallel computing, distributed computing and neural network model. When I started writing this project, I was also sorting out the idea of the coding process. TensorFlow is a huge and complicated project, and it is easy to go beyond the scope of personal ability. Therefore, I want to record the thoughts at the time as much as possible. The process of recording and sorting clears the way of thinking.
+
+
+
+All the examples in this book can be found in the github repository of TensorFlow.NET. When the source code and the code in the book are inconsistent, please refer to the source code. The sample code is typically located in the Example or UnitTest project.
diff --git a/docs/source/FrontCover.md b/docs/source/FrontCover.md
new file mode 100644
index 000000000..322d431c9
--- /dev/null
+++ b/docs/source/FrontCover.md
@@ -0,0 +1,47 @@
+# The Definitive Guide to TensorFlow.NET
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+### The CSharp binding for Google's TensorFlow
+
+#### An Open Source Machine Learning Framework for Everyone
+
+
+
+
+
+
+
+
+
+
+
+
+Haiping Chen
+Christmas, 2018
+
+
+
+
+
+
+
+
+
diff --git a/docs/source/Gradient.md b/docs/source/Gradient.md
new file mode 100644
index 000000000..818ec73e7
--- /dev/null
+++ b/docs/source/Gradient.md
@@ -0,0 +1,15 @@
+# Chapter. Gradient
+
+### Register custom gradient function
+
+TF.NET is extensible which can be added custom gradient function.
+
+```csharp
+// define gradient function
+ops.RegisterGradientFunction("ConcatV2", (oper, out_grads) =>
+{
+ var grad = grads[0];
+ return new Tensor[]{ };
+});
+```
+
diff --git a/docs/source/Graph.md b/docs/source/Graph.md
new file mode 100644
index 000000000..874cd9a42
--- /dev/null
+++ b/docs/source/Graph.md
@@ -0,0 +1,81 @@
+# Chapter 3. Graph
+
+TensorFlow uses a **dataflow graph** to represent your computation in terms of the dependencies between individual operations. A graph defines the computation. It doesn't compute anything, it doesn't hold any values, it just defines the operations that you specified in your code.
+
+### Defining the Graph
+
+We define a graph with a variable and three operations: `variable` returns the current value of our variable. `initialize` assigns the initial value of 31 to that variable. `assign` assigns the new value of 12 to that variable.
+
+```csharp
+with(tf.Graph().as_default(), graph =>
+{
+ var variable = tf.Variable(31, name: "tree");
+ tf.global_variables_initializer();
+ variable.assign(12);
+});
+```
+
+TF.NET simulate a `with` syntax to manage the Graph lifecycle which will be disposed when the graph instance is no long need. The graph is also what the sessions in the next chapter use when not manually specifying a graph because use invoked the `as_default()`.
+
+A typical graph is looks like below:
+
+
+
+
+
+### Save Model
+
+Saving the model means saving all the values of the parameters and the graph.
+
+```python
+saver = tf.train.Saver()
+saver.save(sess,'./tensorflowModel.ckpt')
+```
+
+After saving the model there will be four files:
+
+* tensorflowModel.ckpt.meta:
+* tensorflowModel.ckpt.data-00000-of-00001:
+* tensorflowModel.ckpt.index
+* checkpoint
+
+We also created a protocol buffer file .pbtxt. It is human readable if you want to convert it to binary: `as_text: false`.
+
+* tensorflowModel.pbtxt:
+
+This holds a network of nodes, each representing one operation, connected to each other as inputs and outputs.
+
+
+
+### Freezing the Graph
+
+##### *Why we need it?*
+
+When we need to keep all the values of the variables and the Graph structure in a single file we have to freeze the graph.
+
+```csharp
+from tensorflow.python.tools import freeze_graph
+
+freeze_graph.freeze_graph(input_graph = 'logistic_regression/tensorflowModel.pbtxt',
+ input_saver = "",
+ input_binary = False,
+ input_checkpoint = 'logistic_regression/tensorflowModel.ckpt',
+ output_node_names = "Softmax",
+ restore_op_name = "save/restore_all",
+ filename_tensor_name = "save/Const:0",
+ output_graph = 'frozentensorflowModel.pb',
+ clear_devices = True,
+ initializer_nodes = "")
+
+```
+
+### Optimizing for Inference
+
+To Reduce the amount of computation needed when the network is used only for inferences we can remove some parts of a graph that are only needed for training.
+
+
+
+### Restoring the Model
+
+
+
diff --git a/docs/source/HelloWorld.md b/docs/source/HelloWorld.md
new file mode 100644
index 000000000..8b7fbf733
--- /dev/null
+++ b/docs/source/HelloWorld.md
@@ -0,0 +1,77 @@
+# Get started with TensorFlow.NET
+
+I would describe TensorFlow as an open source machine learning framework developed by Google which can be used to build neural networks and perform a variety of machine learning tasks. it works on data flow graph where nodes are the mathematical operations and the edges are the data in the form of tensor, hence the name Tensor-Flow.
+
+
+
+Let's run a classic HelloWorld program first and see if TensorFlow is running on .NET. I can't think of a simpler way to be a HelloWorld.
+
+
+
+### Install the TensorFlow.NET SDK
+
+TensorFlow.NET uses the .NET Standard 2.0 standard, so your new project Target Framework can be .NET Framework or .NET Core/ .NET 5. All the examples in this book are using .NET Core 3.1 and Microsoft Visual Studio Community 2019. To start building TensorFlow program you just need to download and install the .NET SDK (Software Development Kit). You have to download the latest .NET Core SDK from offical website: https://dotnet.microsoft.com/download.
+
+
+
+1. New a project
+
+ 
+
+2. Choose Console App (.NET Core)
+
+ 
+
+
+
+```cmd
+### install tensorflow C# binding
+PM> Install-Package TensorFlow.NET
+
+### Install tensorflow binary
+### For CPU version
+PM> Install-Package SciSharp.TensorFlow.Redist
+
+### For GPU version (CUDA and cuDNN are required)
+PM> Install-Package SciSharp.TensorFlow.Redist-Windows-GPU
+```
+
+### Start coding Hello World
+
+After installing the TensorFlow.NET package, you can use the `using static Tensorflow.Binding` to introduce the TensorFlow .NET library.
+
+TensorFlow 2.x enabled `Eager Mode` by default. About what eager mode is, I will introduce it in detail in the following chapters.
+
+```csharp
+using System;
+using static Tensorflow.Binding;
+
+namespace TensorFlowNET.Examples
+{
+ ///
+ /// Simple hello world using TensorFlow
+ ///
+ class Program
+ {
+ static void Main(string[] args)
+ {
+ var hello = tf.constant("Hello, TensorFlow!");
+ Console.WriteLine(hello);
+ }
+ }
+}
+```
+After CTRL + F5 run, you will get the output.
+```cmd
+9/20/2020 2:15:09 AM Starting Hello World
+tf.Tensor: shape=(), dtype=string, numpy=Hello, TensorFlow.NET!
+9/20/2020 2:15:09 AM Completed Hello World
+Example: Hello World in 0.1273463s is OK!
+TensorFlow.NET v0.20.1.0
+TensorFlow Binary v2.3.0
+1 of 21 example(s) are completed.
+Press [Enter] to continue...
+```
+
+This sample code can be found at [here](https://github.com/SciSharp/SciSharp-Stack-Examples/blob/master/src/TensorFlowNET.Examples/HelloWorld.cs).
+
diff --git a/docs/source/ImageRecognition.md b/docs/source/ImageRecognition.md
new file mode 100644
index 000000000..74d3ee5bf
--- /dev/null
+++ b/docs/source/ImageRecognition.md
@@ -0,0 +1,137 @@
+# Chapter. Image Recognition
+
+An example for using the [TensorFlow.NET](https://github.com/SciSharp/TensorFlow.NET) and [NumSharp](https://github.com/SciSharp/NumSharp) for image recognition, it will use a pre-trained inception model to predict a image which outputs the categories sorted by probability. The original paper is [here](https://arxiv.org/pdf/1512.00567.pdf). The Inception architecture of GoogLeNet was designed to perform well even under strict constraints on memory and computational budget. The computational cost of Inception is also much lower than other performing successors. This has made it feasible to utilize Inception networks in big-data scenarios, where huge amount of data needed to be processed at reasonable cost or scenarios where memory or computational capacity is inherently limited, for example in mobile vision settings.
+
+The GoogLeNet architecture conforms to below design principles:
+
+* Avoid representational bottlenecks, especially early in the network.
+* Higher dimensional representations are easier to process locally within a network.
+* Spatial aggregation can be done over lower dimensional embeddings without much or any loss in representational power.
+* Balance the width and depth of the network.
+
+#### Let's get started with real code.
+
+##### 1. Prepare data
+
+This example will download the dataset and uncompress it automatically. Some external paths are omitted, please refer to the source code for the real path.
+
+```csharp
+private void PrepareData()
+{
+ Directory.CreateDirectory(dir);
+
+ // get model file
+ string url = "models/inception_v3_2016_08_28_frozen.pb.tar.gz";
+
+ string zipFile = Path.Join(dir, $"{pbFile}.tar.gz");
+ Utility.Web.Download(url, zipFile);
+
+ Utility.Compress.ExtractTGZ(zipFile, dir);
+
+ // download sample picture
+ string pic = "grace_hopper.jpg";
+ Utility.Web.Download($"data/{pic}", Path.Join(dir, pic));
+}
+```
+
+##### 2. Load image file and normalize
+
+We need to load a sample image to test our pre-trained inception model. Convert it into tensor and normalized the input image. The pre-trained model takes input in the form of a 4-dimensional tensor with shape [BATCH_SIZE, INPUT_HEIGHT, INPUT_WEIGHT, 3] where:
+
+- BATCH_SIZE allows for inference of multiple images in one pass through the graph
+- INPUT_HEIGHT is the height of the images on which the model was trained
+- INPUT_WEIGHT is the width of the images on which the model was trained
+- 3 is the (R, G, B) values of the pixel colors represented as a float.
+
+```csharp
+private NDArray ReadTensorFromImageFile(string file_name,
+ int input_height = 299,
+ int input_width = 299,
+ int input_mean = 0,
+ int input_std = 255)
+{
+ return with(tf.Graph().as_default(), graph =>
+ {
+ var file_reader = tf.read_file(file_name, "file_reader");
+ var image_reader = tf.image.decode_jpeg(file_reader, channels: 3, name: "jpeg_reader");
+ var caster = tf.cast(image_reader, tf.float32);
+ var dims_expander = tf.expand_dims(caster, 0);
+ var resize = tf.constant(new int[] { input_height, input_width });
+ var bilinear = tf.image.resize_bilinear(dims_expander, resize);
+ var sub = tf.subtract(bilinear, new float[] { input_mean });
+ var normalized = tf.divide(sub, new float[] { input_std });
+
+ return with(tf.Session(graph), sess => sess.run(normalized));
+ });
+}
+```
+
+##### 3. Load pre-trained model and predict
+
+Load the pre-trained inception model which is saved as Google's protobuf file format. Construct a new graph then set input and output operations in a new session. After run the session, you will get a numpy-like ndarray which is provided by NumSharp. With NumSharp, you can easily perform various operations on multiple dimensional arrays in the .NET environment.
+
+```csharp
+public void Run()
+{
+ PrepareData();
+
+ var labels = File.ReadAllLines(Path.Join(dir, labelFile));
+
+ var nd = ReadTensorFromImageFile(Path.Join(dir, picFile),
+ input_height: input_height,
+ input_width: input_width,
+ input_mean: input_mean,
+ input_std: input_std);
+
+ var graph = Graph.ImportFromPB(Path.Join(dir, pbFile));
+ var input_operation = graph.get_operation_by_name(input_name);
+ var output_operation = graph.get_operation_by_name(output_name);
+
+ var results = with(tf.Session(graph),
+ sess => sess.run(output_operation.outputs[0],
+ new FeedItem(input_operation.outputs[0], nd)));
+
+ results = np.squeeze(results);
+
+ var argsort = results.argsort();
+ var top_k = argsort.Data()
+ .Skip(results.size - 5)
+ .Reverse()
+ .ToArray();
+
+ foreach (float idx in top_k)
+ Console.WriteLine($"{picFile}: {idx} {labels[(int)idx]}, {results[(int)idx]}");
+}
+```
+
+##### 4. Print the result
+
+The best probability is `military uniform` which is 0.8343058. It's the correct classification.
+
+```powershell
+2/18/2019 3:56:18 AM Starting InceptionArchGoogLeNet
+label_image_data\inception_v3_2016_08_28_frozen.pb.tar.gz already exists.
+label_image_data\grace_hopper.jpg already exists.
+2019-02-19 21:56:18.684463: I tensorflow/core/platform/cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2
+create_op: Const 'file_reader/filename', inputs: empty, control_inputs: empty, outputs: file_reader/filename:0
+create_op: ReadFile 'file_reader', inputs: file_reader/filename:0, control_inputs: empty, outputs: file_reader:0
+create_op: DecodeJpeg 'jpeg_reader', inputs: file_reader:0, control_inputs: empty, outputs: jpeg_reader:0
+create_op: Cast 'Cast/Cast', inputs: jpeg_reader:0, control_inputs: empty, outputs: Cast/Cast:0
+create_op: Const 'ExpandDims/dim', inputs: empty, control_inputs: empty, outputs: ExpandDims/dim:0
+create_op: ExpandDims 'ExpandDims', inputs: Cast/Cast:0, ExpandDims/dim:0, control_inputs: empty, outputs: ExpandDims:0
+create_op: Const 'Const', inputs: empty, control_inputs: empty, outputs: Const:0
+create_op: ResizeBilinear 'ResizeBilinear', inputs: ExpandDims:0, Const:0, control_inputs: empty, outputs: ResizeBilinear:0
+create_op: Const 'y', inputs: empty, control_inputs: empty, outputs: y:0
+create_op: Sub 'Sub', inputs: ResizeBilinear:0, y:0, control_inputs: empty, outputs: Sub:0
+create_op: Const 'y_1', inputs: empty, control_inputs: empty, outputs: y_1:0
+create_op: RealDiv 'truediv', inputs: Sub:0, y_1:0, control_inputs: empty, outputs: truediv:0
+grace_hopper.jpg: 653 military uniform, 0.8343058
+grace_hopper.jpg: 668 mortarboard, 0.02186947
+grace_hopper.jpg: 401 academic gown, 0.01035806
+grace_hopper.jpg: 716 pickelhaube, 0.008008132
+grace_hopper.jpg: 466 bulletproof vest, 0.005350832
+2/18/2019 3:56:25 AM Completed InceptionArchGoogLeNet
+```
+
+You can find the full source code from [github](https://github.com/SciSharp/TensorFlow.NET-Examples/tree/master/src/TensorFlowNET.Examples/ImageProcessing).
+
diff --git a/docs/source/LinearRegression.md b/docs/source/LinearRegression.md
new file mode 100644
index 000000000..8033625c3
--- /dev/null
+++ b/docs/source/LinearRegression.md
@@ -0,0 +1,85 @@
+# Chapter. Linear Regression
+
+
+
+### What is linear regression?
+
+Linear regression is a linear approach to modelling the relationship between a scalar response (or dependent variable) and one or more explanatory variables (or independent variables).
+
+Consider the case of a single variable of interest y and a single predictor variable x. The predictor variables are called by many names: covariates, inputs, features; the predicted variable is often called response, output, outcome.
+
+We have some data $D=\{x{\tiny i},y{\tiny i}\}$ and we assume a simple linear model of this dataset with Gaussian noise:
+
+
+```csharp
+// Prepare training Data
+var train_X = np.array(3.3f, 4.4f, 5.5f, 6.71f, 6.93f, 4.168f, 9.779f, 6.182f, 7.59f, 2.167f, 7.042f, 10.791f, 5.313f, 7.997f, 5.654f, 9.27f, 3.1f);
+var train_Y = np.array(1.7f, 2.76f, 2.09f, 3.19f, 1.694f, 1.573f, 3.366f, 2.596f, 2.53f, 1.221f, 2.827f, 3.465f, 1.65f, 2.904f, 2.42f, 2.94f, 1.3f);
+var n_samples = train_X.shape[0];
+```
+
+
+Based on the given data points, we try to plot a line that models the points the best. The red line can be modelled based on the linear equation: $y = wx + b$. The motive of the linear regression algorithm is to find the best values for $w$ and $b$. Before moving on to the algorithm, le's have a look at two important concepts you must know to better understand linear regression.
+
+
+
+### Cost Function
+
+The cost function helps us to figure out the best possible values for $w$ and $b$ which would provide the best fit line for the data points. Since we want the best values for $w$ and $b$, we convert this search problem into a minimization problem where we would like to minimize the error between the predicted value and the actual value.
+
+
+
+
+
+We choose the above function to minimize. The difference between the predicted values and ground truth measures the error difference. We square the error difference and sum over all data points and divide that
+value by the total number of data points. This provides the average squared error over all the data points. Therefore, this cost function is also known as the Mean Squared Error(MSE) function. Now, using this MSE
+function we are going to change the values of $w$ and $b$ such that the MSE value settles at the minima.
+
+
+
+```csharp
+// tf Graph Input
+var X = tf.placeholder(tf.float32);
+var Y = tf.placeholder(tf.float32);
+
+// Set model weights
+var W = tf.Variable(rng.randn(), name: "weight");
+var b = tf.Variable(rng.randn(), name: "bias");
+
+// Construct a linear model
+var pred = tf.add(tf.multiply(X, W), b);
+
+// Mean squared error
+var cost = tf.reduce_sum(tf.pow(pred - Y, 2.0f)) / (2.0f * n_samples);
+```
+
+
+
+### Gradient Descent
+
+The another important concept needed to understand is gradient descent. Gradient descent is a method of updating $w$ and $b$ to minimize the cost function. The idea is that we start with some random values for $w$ and $b$ and then we change these values iteratively to reduce the cost. Gradient descent helps us on how to update the values or which direction we would go next. Gradient descent is also know as **steepest descent**.
+
+
+
+
+
+
+To draw an analogy, imagine a pit in the shape of U and you are standing at the topmost point in the pit and your objective is to reach the bottom of the pit. There is a catch, you can only take a discrete number
+of steps to reach the bottom. If you decide to take one step at a time you would eventually reach the bottom of the pit but this would take a longer time. If you choose to take longer steps each time, you would
+reach sooner but, there is a chance that you could overshoot the bottom of the pit and not exactly at the bottom. In the gradient descent algorithm, the number of steps you take is the learning rate. This
+decides on how fast the algorithm converges to the minima.
+
+
+
+
+```csharp
+// Gradient descent
+// Note, minimize() knows to modify W and b because Variable objects are trainable=True by default
+var optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost);
+```
+
+When we visualize the graph in TensorBoard:
+
+
+
+The full example is [here](https://github.com/SciSharp/TensorFlow.NET-Examples/blob/master/src/TensorFlowNET.Examples/BasicModels/LinearRegression.cs).
diff --git a/docs/source/LogisticRegression.md b/docs/source/LogisticRegression.md
new file mode 100644
index 000000000..ddf75f846
--- /dev/null
+++ b/docs/source/LogisticRegression.md
@@ -0,0 +1,16 @@
+# Chapter. Logistic Regression
+
+### What is logistic regression?
+
+Logistic regression is a statistical analysis method used to predict a data value based on prior observations of a data set. A logistic regression model predicts a dependent data variable by analyzing the relationship between one or more existing independent variables.
+
+
+
+The dependent variable of logistics regression can be two-category or multi-category, but the two-category is more common and easier to explain. So the most common use in practice is the logistics of the two classifications. An example used by TensorFlow.NET is a hand-written digit recognition, which is a multi-category.
+
+
+
+Softmax regression allows us to handle  where K is the number of classes.
+
+
+The full example is [here](https://github.com/SciSharp/TensorFlow.NET-Examples/blob/master/src/TensorFlowNET.Examples/BasicModels/LogisticRegression.cs).
diff --git a/docs/source/MnistInRnn.md b/docs/source/MnistInRnn.md
new file mode 100644
index 000000000..ce8a13909
--- /dev/null
+++ b/docs/source/MnistInRnn.md
@@ -0,0 +1,5 @@
+# Chapter. MNIST In RNN
+
+### Recurrent Neural Networks
+
+Recurrent Neural Networks (RNNs) are popular models that have shown great promise in sequential data classification task. The traditional neural network model cannot make the next prediction input based on the knowledge that has been learned before.
\ No newline at end of file
diff --git a/docs/source/NearestNeighbor.md b/docs/source/NearestNeighbor.md
new file mode 100644
index 000000000..94e300df6
--- /dev/null
+++ b/docs/source/NearestNeighbor.md
@@ -0,0 +1,5 @@
+# Chapter. Nearest Neighbor
+
+The nearest neighbour algorithm was one of the first algorithms used to solve the travelling salesman problem. In it, the salesman starts at a random city and repeatedly visits the nearest city until all have been visited. It quickly yields a short tour, but usually not the optimal one.
+
+The full example is [here](https://github.com/SciSharp/TensorFlow.NET-Examples/blob/master/src/TensorFlowNET.Examples/BasicModels/NearestNeighbor.cs).
\ No newline at end of file
diff --git a/docs/source/NeuralNetwork.md b/docs/source/NeuralNetwork.md
new file mode 100644
index 000000000..1d46111b7
--- /dev/null
+++ b/docs/source/NeuralNetwork.md
@@ -0,0 +1,244 @@
+# Chapter. Neural Network
+
+In this chapter, we'll learn how to build a graph of neural network model. The key advantage of neural network compared to Linear Classifier is that it can separate data which it not linearly separable. We'll implement this model to classify hand-written digits images from the MNIST dataset.
+
+
+
+The structure of the neural network we're going to build is as follows. The hand-written digits images of the MNIST data which has 10 classes (from 0 to 9). The network is with 2 hidden layers: the first layer with 200 hidden units (neurons) and the second one (known as classifier layer) with 10 neurons.
+
+
+
+Get started with the implementation step by step:
+
+1. **Prepare data**
+
+ MNIST is dataset of handwritten digits which contains 55,000 examples for training, 5,000 examples for validation and 10,000 example for testing. The digits have been size-normalized and centered in a fixed-size image (28 x 28 pixels) with values from 0 and 1.Each image has been flattened and converted to a 1-D array of 784 features. It's also kind of benchmark of datasets for deep learning.
+
+ 
+
+ We define some variables makes it easier to modify them later. It's important to note that in a linear model, we have to flatten the input images to a vector.
+
+ ```csharp
+ using System;
+ using NumSharp;
+ using Tensorflow;
+ using TensorFlowNET.Examples.Utility;
+ using static Tensorflow.Python;
+ ```
+
+ ```csharp
+ const int img_h = 28;
+ const int img_w = 28;
+ int img_size_flat = img_h * img_w; // 784, the total number of pixels
+ int n_classes = 10; // Number of classes, one class per digit
+ ```
+
+ We'll write the function which automatically loads the MNIST data and returns it in our desired shape and format. There is an MNIST data helper to make life easier.
+
+ ```csharp
+ Datasets mnist;
+ public void PrepareData()
+ {
+ mnist = MnistDataSet.read_data_sets("mnist", one_hot: true);
+ }
+ ```
+
+ Other than a function for loading the images and corresponding labels, we still need two more functions:
+
+ **randomize**: which randomizes the order of images and their labels. At the beginning of each epoch, we will re-randomize the order of data samples to make sure that the trained model is not sensitive to the order of data.
+
+ ```csharp
+ private (NDArray, NDArray) randomize(NDArray x, NDArray y)
+ {
+ var perm = np.random.permutation(y.shape[0]);
+
+ np.random.shuffle(perm);
+ return (mnist.train.images[perm], mnist.train.labels[perm]);
+ }
+ ```
+
+ **get_next_batch**: which only selects a few number of images determined by the batch_size variable (as per Stochastic Gradient Descent method).
+
+ ```csharp
+ private (NDArray, NDArray) get_next_batch(NDArray x, NDArray y, int start, int end)
+ {
+ var x_batch = x[$"{start}:{end}"];
+ var y_batch = y[$"{start}:{end}"];
+ return (x_batch, y_batch);
+ }
+ ```
+
+2. **Set Hyperparameters**
+
+ There're about 55,000 images in training set, it takes a long time to calculate the gradient of the model using all there images. Therefore we use a small batch of images in each iteration of the optimizer by Stochastic Gradient Descent.
+
+ * epoch: one forward pass and one backward pass of all the training examples.
+ * batch size: the number of training examples in one forward/backward pass. The higher the batch size, the more memory space you'll need.
+ * iteration: one forward pass and one backward pass of one batch of images the training examples.
+
+ ```csharp
+ int epochs = 10;
+ int batch_size = 100;
+ float learning_rate = 0.001f;
+ int h1 = 200; // number of nodes in the 1st hidden layer
+ ```
+
+3. **Building the neural network**
+
+ Let's make some functions to help build computation graph.
+
+ **variables**: We need to define two variables `W` and `b` to construct our linear model. We use `Tensorflow Variables` of proper size and initialization to define them.
+
+ ```csharp
+ // weight_variable
+ var in_dim = x.shape[1];
+
+ var initer = tf.truncated_normal_initializer(stddev: 0.01f);
+ var W = tf.get_variable("W_" + name,
+ dtype: tf.float32,
+ shape: (in_dim, num_units),
+ initializer: initer);
+
+ // bias_variable
+ var initial = tf.constant(0f, num_units);
+ var b = tf.get_variable("b_" + name,
+ dtype: tf.float32,
+ initializer: initial);
+ ```
+
+ **fully-connected layer**: Neural network consists of stacks of fully-connected (dense) layers. Having the weight (W) and bias (b) variables, a fully-connected layer is defined as `activation(W x X + b)`. The complete `fc_layer` function is as below:
+
+ ```csharp
+ private Tensor fc_layer(Tensor x, int num_units, string name, bool use_relu = true)
+ {
+ var in_dim = x.shape[1];
+
+ var initer = tf.truncated_normal_initializer(stddev: 0.01f);
+ var W = tf.get_variable("W_" + name,
+ dtype: tf.float32,
+ shape: (in_dim, num_units),
+ initializer: initer);
+
+ var initial = tf.constant(0f, num_units);
+ var b = tf.get_variable("b_" + name,
+ dtype: tf.float32,
+ initializer: initial);
+
+ var layer = tf.matmul(x, W) + b;
+ if (use_relu)
+ layer = tf.nn.relu(layer);
+
+ return layer;
+ }
+ ```
+
+ **inputs**: Now we need to define the proper tensors to feed in the input to our model. Placeholder variable is the suitable choice for the input images and corresponding labels. This allow us to change the inputs (images and labels) to the TensorFlow graph.
+
+ ```csharp
+ // Placeholders for inputs (x) and outputs(y)
+ x = tf.placeholder(tf.float32, shape: (-1, img_size_flat), name: "X");
+ y = tf.placeholder(tf.float32, shape: (-1, n_classes), name: "Y");
+ ```
+
+ Placeholder `x` is defined for the images, the shape is set to `[None, img_size_flat]`, where `None` means that the tensor may hold an arbitrary number of images with each image being a vector of length `img_size_flat`.
+
+ Placeholder `y` is the variable for the true labels associated with the images that were input in the placeholder variable `x`. It holds an arbitrary number of labels and each label is a vector of length `num_classes` which is 10.
+
+ **network layers**: After creating the proper input, we have to pass it to our model. Since we have a neural network, we can stack multiple fully-connected layers using `fc_layer` method. Note that we will not use any activation function (use_relu = false) in the last layer. The reason is that we can use `tf.nn.softmax_cross_entropy_with_logits` to calculate the loss.
+
+ ```csharp
+ // Create a fully-connected layer with h1 nodes as hidden layer
+ var fc1 = fc_layer(x, h1, "FC1", use_relu: true);
+ // Create a fully-connected layer with n_classes nodes as output layer
+ var output_logits = fc_layer(fc1, n_classes, "OUT", use_relu: false);
+ ```
+
+ **loss function**: After creating the network, we have to calculate the loss and optimize it, we have to calculate the `correct_prediction` and `accuracy`.
+
+ ```csharp
+ // Define the loss function, optimizer, and accuracy
+ var logits = tf.nn.softmax_cross_entropy_with_logits(labels: y, logits: output_logits);
+ loss = tf.reduce_mean(logits, name: "loss");
+ optimizer = tf.train.AdamOptimizer(learning_rate: learning_rate, name: "Adam-op").minimize(loss);
+ var correct_prediction = tf.equal(tf.argmax(output_logits, 1), tf.argmax(y, 1), name: "correct_pred");
+ accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name: "accuracy");
+ ```
+
+ **initialize variables**: We have to invoke a variable initializer operation to initialize all variables.
+
+ ```csharp
+ var init = tf.global_variables_initializer();
+ ```
+
+ The complete computation graph is looks like below:
+
+ 
+
+4. **Train**
+
+ After creating the graph, we can train our model. To train the model, we have to create a session and run the graph in the session.
+
+ ```csharp
+ // Number of training iterations in each epoch
+ var num_tr_iter = mnist.train.labels.len / batch_size;
+ with(tf.Session(), sess =>
+ {
+ sess.run(init);
+
+ float loss_val = 100.0f;
+ float accuracy_val = 0f;
+
+ foreach (var epoch in range(epochs))
+ {
+ print($"Training epoch: {epoch + 1}");
+ // Randomly shuffle the training data at the beginning of each epoch
+ var (x_train, y_train) = randomize(mnist.train.images, mnist.train.labels);
+
+ foreach (var iteration in range(num_tr_iter))
+ {
+ var start = iteration * batch_size;
+ var end = (iteration + 1) * batch_size;
+ var (x_batch, y_batch) = get_next_batch(x_train, y_train, start, end);
+
+ // Run optimization op (backprop)
+ sess.run(optimizer, new FeedItem(x, x_batch), new FeedItem(y, y_batch));
+
+ if (iteration % display_freq == 0)
+ {
+ // Calculate and display the batch loss and accuracy
+ var result = sess.run(new[] { loss, accuracy }, new FeedItem(x, x_batch), new FeedItem(y, y_batch));
+ loss_val = result[0];
+ accuracy_val = result[1];
+ print($"iter {iteration.ToString("000")}: Loss={loss_val.ToString("0.0000")}, Training Accuracy={accuracy_val.ToString("P")}");
+ }
+ }
+
+ // Run validation after every epoch
+ var results1 = sess.run(new[] { loss, accuracy }, new FeedItem(x, mnist.validation.images), new FeedItem(y, mnist.validation.labels));
+ loss_val = results1[0];
+ accuracy_val = results1[1];
+ print("---------------------------------------------------------");
+ print($"Epoch: {epoch + 1}, validation loss: {loss_val.ToString("0.0000")}, validation accuracy: {accuracy_val.ToString("P")}");
+ print("---------------------------------------------------------");
+ }
+ });
+ ```
+
+5. **Test**
+
+ After the training is done, we have to test our model to see how good it performs on a new dataset.
+
+ ```csharp
+ var result = sess.run(new[] { loss, accuracy }, new FeedItem(x, mnist.test.images), new FeedItem(y, mnist.test.labels));
+ loss_test = result[0];
+ accuracy_test = result[1];
+ print("---------------------------------------------------------");
+ print($"Test loss: {loss_test.ToString("0.0000")}, test accuracy: {accuracy_test.ToString("P")}");
+ print("---------------------------------------------------------");
+ ```
+
+ 
+
+
+
+
diff --git a/docs/source/Operation.md b/docs/source/Operation.md
new file mode 100644
index 000000000..67a25a37f
--- /dev/null
+++ b/docs/source/Operation.md
@@ -0,0 +1,3 @@
+# Chapter. Operation
+
+`Operation` represents a `Graph` node that performs computation on tensors. An operation is a `Node` in a `Graph` that takes zero or more `Tensor`s (produced by other Operations in the Graph) as input, and produces zero or more Tensors as output.
\ No newline at end of file
diff --git a/docs/source/Placeholder.md b/docs/source/Placeholder.md
new file mode 100644
index 000000000..2cf345bd0
--- /dev/null
+++ b/docs/source/Placeholder.md
@@ -0,0 +1,20 @@
+# Chapter. Placeholder
+
+In this chapter we will talk about another common data type in TensorFlow: Placeholder. It is a simplified variable that can be passed to the required value by the session when the graph is run, that is, when you build the graph, you don't need to specify the value of that variable, but delay the session to the beginning. In TensorFlow terminology, we then feed data into the graph through these placeholders. The difference between placeholders and constants is that placeholders can specify coefficient values more flexibly without modifying the code that builds the graph. For example, mathematical constants are suitable for Constant, and some model smoothing values can be specified with Placeholder.
+
+
+
+```csharp
+var x = tf.placeholder(tf.int32);
+var y = x * 3;
+
+using (var sess = tf.Session())
+{
+ var result = sess.run(y, feed_dict: new FeedItem[]
+ {
+ new FeedItem(x, 2)
+ });
+ // (int)result should be 6;
+}
+```
+
diff --git a/docs/source/Preface.md b/docs/source/Preface.md
new file mode 100644
index 000000000..65db8c1d2
--- /dev/null
+++ b/docs/source/Preface.md
@@ -0,0 +1,15 @@
+
+
+# Preface
+
+Why do I start the TensorFlow.NET project?
+
+In a few days, it was Christmas in 2018. I watched my children grow up and be sensible every day, and I felt that time passed too fast. IT technology updates are faster than ever, and a variety of front-end technologies are emerging. Big data, Artificial Intelligence and Blockchain, Container technology and Microservices, Distributed Computing and Serverless technology are dazzling. The Amazon AI service interface claims that engineers who don't need any machine learning experience can use it, so that the idea of just calming down for two years and planning to switch to an AI architecture in the future is a splash of cold water.
+
+
+
+TensorFlow is an open source project for machine learning especially for deep learning. It's used for both research and production at Google company. It's designed according to dataflow programming pattern across a range of tasks. TensorFlow is not just a deep learning library. As long as you can represent your calculation process as a data flow diagram, you can use TensorFlow for distributed computing. TensorFlow uses a computational graph to build a computing network while operating on the graph. Users can write their own upper-level models in Python based on TensorFlow, or extend the underlying C++ custom action code to TensorFlow.
+
+
+
+In order to avoid confusion, the unique classes defined in TensorFlow are not translated in this book. For example, Tensor, Graph, Shape will retain the English name.
diff --git a/docs/source/Queue.md b/docs/source/Queue.md
new file mode 100644
index 000000000..7f137fb32
--- /dev/null
+++ b/docs/source/Queue.md
@@ -0,0 +1,157 @@
+# Chapter. Queue
+
+ThensorFlow is capable to handle multiple threads, and queues are powerful mechanism for asynchronous computation. If we have large datasets this can significantly speed up the training process of our models. This functionality is especially handy when reading, pre-processing and extracting in mini-batches our training data. The secret to being able to do professional and high performance training of our model is understanding TensorFlow queuing operations. TensorFlow has implemented 4 types of Queue: **FIFOQueue**, **PaddingFIFOQueue**, **PriorityQueue** and **RandomShuffleQueue**.
+
+
+
+Like everything in TensorFlow, a queue is a node in a computation graph. It's a stateful node, like a variable: other nodes can modify its content, In particular, nodes can enqueue new items into the queue, or dequeue existing items from the queue.
+
+To get started with queue, let's consider a simple example. We will create a "first in, first out" queue (FIFOQueue) and fill it with numbers. Then we'll construct a graph that takes an item off the queue, adds one to that item, and puts it back on the end of the queue.
+
+```csharp
+[TestMethod]
+public void FIFOQueue()
+{
+ // create a first in first out queue with capacity up to 2
+ // and data type set as int32
+ var queue = tf.FIFOQueue(2, tf.int32);
+ // init queue, push 2 elements into queue.
+ var init = queue.enqueue_many(new[] { 10, 20 });
+ // pop out the first element
+ var x = queue.dequeue();
+ // add 1
+ var y = x + 1;
+ // push back into queue
+ var inc = queue.enqueue(y);
+
+ using (var sess = tf.Session())
+ {
+ // init queue
+ init.run();
+
+ // pop out first element and push back calculated y
+ (int dequeued, _) = sess.run((x, inc));
+ Assert.AreEqual(10, dequeued);
+
+ (dequeued, _) = sess.run((x, inc));
+ Assert.AreEqual(20, dequeued);
+
+ (dequeued, _) = sess.run((x, inc));
+ Assert.AreEqual(11, dequeued);
+
+ (dequeued, _) = sess.run((x, inc));
+ Assert.AreEqual(21, dequeued);
+
+ // thread will hang or block if you run sess.run(x) again
+ // until queue has more element.
+ }
+}
+```
+
+`Enqueue`, `EnqueueMany` and `Dequeue` are special nodes. They take a pointer to the queue instead of a normal value, allowing them to change it. I first create a FIFOQueue *queue* of size up to 3, I enqueue two values into the *queue*. Then I immediately attempt to *dequeue* a value from it and assign it to *y* where I simply add 1 to the dequeued variable. Next, we start up a *session* and run. After we've run this operation a few times the queue will be empty - if we try and run the operation again, the main thread of the program will hang or block - this is because it will be waiting for another operation to be run to put more values in the queue.
+
+#### FIFOQueue
+
+Creates a queue that dequeues elements in a first-in first-out order. A `FIFOQueue` has bounded capacity; supports multiple concurrent producers and consumers; and provides exactly-once delivery. A `FIFOQueue` holds a list of up to `capacity` elements. Each element is a fixed-length tuple of tensors whose dtypes are described by `dtypes`, and whose shapes are optionally described by the `shapes` argument.
+
+#### PaddingFIFOQueue
+
+A FIFOQueue that supports batching variable-sized tensors by padding. A `PaddingFIFOQueue` may contain components with dynamic shape, while also supporting `dequeue_many`. A `PaddingFIFOQueue` holds a list of up to `capacity` elements. Each element is a fixed-length tuple of tensors whose dtypes are described by `dtypes`, and whose shapes are described by the `shapes` argument.
+
+```chsarp
+[TestMethod]
+public void PaddingFIFOQueue()
+{
+ var numbers = tf.placeholder(tf.int32);
+ var queue = tf.PaddingFIFOQueue(10, tf.int32, new TensorShape(-1));
+ var enqueue = queue.enqueue(numbers);
+ var dequeue_many = queue.dequeue_many(n: 3);
+
+ using(var sess = tf.Session())
+ {
+ sess.run(enqueue, (numbers, new[] { 1 }));
+ sess.run(enqueue, (numbers, new[] { 2, 3 }));
+ sess.run(enqueue, (numbers, new[] { 3, 4, 5 }));
+
+ var result = sess.run(dequeue_many[0]);
+
+ Assert.IsTrue(Enumerable.SequenceEqual(new int[] { 1, 0, 0 }, result[0].ToArray()));
+ Assert.IsTrue(Enumerable.SequenceEqual(new int[] { 2, 3, 0 }, result[1].ToArray()));
+ Assert.IsTrue(Enumerable.SequenceEqual(new int[] { 3, 4, 5 }, result[2].ToArray()));
+ }
+}
+```
+
+
+
+#### PriorityQueue
+
+A queue implementation that dequeues elements in prioritized order. A `PriorityQueue` has bounded capacity; supports multiple concurrent producers and consumers; and provides exactly-once delivery. A `PriorityQueue` holds a list of up to `capacity` elements. Each element is a fixed-length tuple of tensors whose dtypes are described by `types`, and whose shapes are optionally described by the `shapes` argument.
+
+```csharp
+[TestMethod]
+public void PriorityQueue()
+{
+ var queue = tf.PriorityQueue(3, tf.@string);
+ var init = queue.enqueue_many(new[] { 2L, 4L, 3L }, new[] { "p1", "p2", "p3" });
+ var x = queue.dequeue();
+
+ using (var sess = tf.Session())
+ {
+ init.run();
+
+ // output will 2, 3, 4
+ var result = sess.run(x);
+ Assert.AreEqual(result[0].GetInt64(), 2L);
+
+ result = sess.run(x);
+ Assert.AreEqual(result[0].GetInt64(), 3L);
+
+ result = sess.run(x);
+ Assert.AreEqual(result[0].GetInt64(), 4L);
+ }
+}
+```
+
+
+
+#### RandomShuffleQueue
+
+A queue implementation that dequeues elements in a random order. A `RandomShuffleQueue` has bounded capacity; supports multiple concurrent producers and consumers; and provides exactly-once delivery. A `RandomShuffleQueue` holds a list of up to `capacity` elements. Each element is a fixed-length tuple of tensors whose dtypes are described by `dtypes`, and whose shapes are optionally described by the `shapes` argument.
+
+```csharp
+[TestMethod]
+public void RandomShuffleQueue()
+{
+ var queue = tf.RandomShuffleQueue(10, min_after_dequeue: 1, dtype: tf.int32);
+ var init = queue.enqueue_many(new[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 });
+ var x = queue.dequeue();
+
+ string results = "";
+ using (var sess = tf.Session())
+ {
+ init.run();
+
+ foreach(var i in range(9))
+ results += (int)sess.run(x) + ".";
+
+ // output in random order
+ // 1.2.3.4.5.6.7.8.9.
+ }
+}
+```
+
+
+
+Queue methods must run on the same device as the queue. `FIFOQueue` and `RandomShuffleQueue` are important TensorFlow objects for computing tensor asynchronously in a graph. For example, a typical input architecture is to use a `RandomShuffleQueue` to prepare inputs for training a model:
+
+* Multiple threads prepare training examples and push them in the queue.
+* A training thread executes a training op that dequeues mini-batches from the queue.
+
+This architecture simplifies the construction of input pipelines.
+
+
+
+From the above example, once the output gets to the point above you’ll actually have to terminate the program as it is blocked. Now, this isn’t very useful. What we really want to happen is for our little program to reload or enqueue more values whenever our queue is empty or is about to become empty. We could fix this by explicitly running our *enqueue_op* again in the code above to reload our queue with values. However, for large, more realistic programs, this will become unwieldy. Thankfully, TensorFlow has a solution.
+
+TensorFlow provides two classes to help multi-threading task: `tf.Coordinator` and `tf.QueueRunner`. There two classes are designed to be used together. The `Coordinator` class helps multiple threads stop together and report exceptions to a main thread. The `QueueRunner` class is used to create a number of threads cooperating to enqueue tensors in the same queue.
diff --git a/docs/source/Session.md b/docs/source/Session.md
new file mode 100644
index 000000000..d6f249048
--- /dev/null
+++ b/docs/source/Session.md
@@ -0,0 +1,29 @@
+# Chapter. Session
+
+TensorFlow **session** runs parts of the graph across a set of local and remote devices. A session allows to execute graphs or part of graphs. It allocates resources (on one or more machines) for that and holds the actual values of intermediate results and variables.
+
+
+
+### Running Computations in a Session
+
+Let's complete the example in last chapter. To run any of the operations, we need to create a session for that graph. The session will also allocate memory to store the current value of the variable.
+
+
+
+```csharp
+with(tf.Graph(), graph =>
+{
+ var variable = tf.Variable(31, name: "tree");
+ var init = tf.global_variables_initializer();
+
+ var sess = tf.Session(graph);
+ sess.run(init);
+
+ var result = sess.run(variable); // 31
+
+ var assign = variable.assign(12);
+ result = sess.run(assign); // 12
+});
+```
+
+The value of our variables is only valid within one session. If we try to get the value in another session. TensorFlow will raise an error of `Attempting to use uninitialized value foo`. Of course, we can use the graph in more than one session, because session copies graph definition to new memory area. We just have to initialize the variables again. The values in the new session will be completely independent from the previous one.
diff --git a/docs/source/Table of Contents.md b/docs/source/Table of Contents.md
new file mode 100644
index 000000000..b28505cc8
--- /dev/null
+++ b/docs/source/Table of Contents.md
@@ -0,0 +1,44 @@
+# Table of Contents
+
+### Foreword...........................................................................................xxi
+
+### Preface..............................................................................................xxiii
+
+## Part I. Getting Started
+
+##### 1. You Know, for Machine Learning............................................................................................ 3
+
+ Installing Tensorflow.NET
+ Running Tensorflow.NET
+ Talking to Tensorflow.NET
+
+##### 2. Hello World
+
+
+
+## Part II. Tensorflow.NET in Depth
+
+##### 1. Control Dependency ......................................................................................................
+
+##### 2. Graph ....................................
+
+##### 3. Session ............................
+
+
+
+## Part III. Dealing with Human Language
+
+##### 1. Text Classification ............................................................................................
+
+##### 2. Named Entity Recognition ..............................................................................
+
+##### 3. Sentiment Analyze ...........................................................................................
+
+##### 4. Sentence Dependency ........................................................................
+
+
+
+## Part IV. Image Recognition
+
+##### 1. Inception Model ................................................................................................................. 100
+
diff --git a/docs/source/Tensor.md b/docs/source/Tensor.md
new file mode 100644
index 000000000..aefb884f7
--- /dev/null
+++ b/docs/source/Tensor.md
@@ -0,0 +1,52 @@
+# Chapter 1. Tensor
+
+### Represents one of the outputs of an Operation
+
+
+
+##### What is Tensor?
+
+Tensor holds a multi-dimensional array of elements of a single data type which is very similar with `NumPy`'s `ndarray`. When the dimension is zero, it can be called a scalar. When the dimension is 2, it can be called a matrix. When the dimension is greater than 2, it is usually called a tensor. If you are very familiar with `NumPy`, then understanding Tensor will be quite easy.
+
+
+
+##### How to create a Tensor?
+
+There are many ways to initialize a Tensor object in TF.NET. It can be initialized from a scalar, string, matrix or tensor. But the best way to create a Tensor is using high level APIs like `tf.constant`, `tf.zeros` and `tf.ones`. We'll talk about constant more detail in next chapter.
+
+```csharp
+// Create a tensor holds a scalar value
+var t1 = new Tensor(3);
+
+// Init from a string
+var t2 = new Tensor("Hello! TensorFlow.NET");
+
+// Tensor holds a ndarray
+var nd = new NDArray(new int[]{3, 1, 1, 2});
+var t3 = new Tensor(nd);
+
+Console.WriteLine($"t1: {t1}, t2: {t2}, t3: {t3}");
+```
+
+
+
+##### Data Structure of Tensor
+
+TF uses column major order. If we use NumSharp to generate a 2 x 3 matrix, if we access the data from 0 to 5 in order, we won't get a number of 1-6, but we get the order of 1, 4, 2, 5, 3, 6. a set of numbers.
+
+```csharp
+// Generate a matrix:[[1, 2, 3], [4, 5, 6]]
+var nd = np.array(1f, 2f, 3f, 4f, 5f, 6f).reshape(2, 3);
+// The index will be 0 2 4 1 3 5, it's column-major order.
+```
+
+
+
+
+
+
+
+##### Index/ Slice of Tensor
+
+Tensor element can be accessed by `index` and `slice` related operations. Through some high level APIs, we can easily access specific dimension's data.
+
diff --git a/docs/source/Train.md b/docs/source/Train.md
new file mode 100644
index 000000000..85d441ba0
--- /dev/null
+++ b/docs/source/Train.md
@@ -0,0 +1,12 @@
+# Chapter. Trainer
+
+### Saver
+
+The `tf.train.saver` class provides methods to save and restore models.
+
+
+
+### Saver Builder
+
+##### Bulk Saver Builder
+
diff --git a/docs/source/Variable.md b/docs/source/Variable.md
new file mode 100644
index 000000000..c4f6a6af6
--- /dev/null
+++ b/docs/source/Variable.md
@@ -0,0 +1,18 @@
+# Chapter. Variable
+
+The variables in TensorFlow are mainly used to represent variable parameter values in the machine learning model. Variables can be initialized by the `tf.Variable` function. During the graph computation the variables are modified by other operations. Variables exist in the session, as long as they are in the same session, other computing nodes on the network can access the same variable value. Variables use lazy loading and will only request memory space when they are used.
+
+
+
+```csharp
+var x = tf.Variable(10, name: "x");
+using (var session = tf.Session())
+{
+ session.run(x.initializer);
+ var result = session.run(x);
+ Console.Write(result); // should be 10
+}
+```
+
+The above code first creates a variable operation, initializes the variable, then runs the session, and finally gets the result. This code is very simple, but it shows the complete process how TensorFlow operates on variables. When creating a variable, you pass a `tensor` as the initial value to the function `Variable()`. TensorFlow provides a series of operators to initialize the tensor, the initial value is a constant or a random value.
+
diff --git a/docs/source/_static/FIFOQueue-example.jpg b/docs/source/_static/FIFOQueue-example.jpg
new file mode 100644
index 000000000..ac2749346
Binary files /dev/null and b/docs/source/_static/FIFOQueue-example.jpg differ
diff --git a/docs/The-Definitive-Guide/assets/column-major-order.png b/docs/source/_static/column-major-order.png
similarity index 100%
rename from docs/The-Definitive-Guide/assets/column-major-order.png
rename to docs/source/_static/column-major-order.png
diff --git a/docs/source/_static/constant/n-index-formula-offset.svg b/docs/source/_static/constant/n-index-formula-offset.svg
new file mode 100644
index 000000000..6c5a3219c
--- /dev/null
+++ b/docs/source/_static/constant/n-index-formula-offset.svg
@@ -0,0 +1,41 @@
+
+
+
\ No newline at end of file
diff --git a/docs/source/_static/constant/n-index-formula.svg b/docs/source/_static/constant/n-index-formula.svg
new file mode 100644
index 000000000..5d05c06f0
--- /dev/null
+++ b/docs/source/_static/constant/n-index-formula.svg
@@ -0,0 +1,33 @@
+
+
+
\ No newline at end of file
diff --git a/docs/source/_static/contiguous-block-of-memory-ndarray-example-1.png b/docs/source/_static/contiguous-block-of-memory-ndarray-example-1.png
new file mode 100644
index 000000000..140e37716
Binary files /dev/null and b/docs/source/_static/contiguous-block-of-memory-ndarray-example-1.png differ
diff --git a/docs/source/_static/contiguous-block-of-memory.png b/docs/source/_static/contiguous-block-of-memory.png
new file mode 100644
index 000000000..44d3ab62f
Binary files /dev/null and b/docs/source/_static/contiguous-block-of-memory.png differ
diff --git a/docs/source/_static/front-cover.jpg b/docs/source/_static/front-cover.jpg
new file mode 100644
index 000000000..3452f8001
Binary files /dev/null and b/docs/source/_static/front-cover.jpg differ
diff --git a/docs/source/_static/gradient-descent.png b/docs/source/_static/gradient-descent.png
new file mode 100644
index 000000000..3fcde528d
Binary files /dev/null and b/docs/source/_static/gradient-descent.png differ
diff --git a/docs/source/_static/linear-regression-tensor-board.png b/docs/source/_static/linear-regression-tensor-board.png
new file mode 100644
index 000000000..ea9304a02
Binary files /dev/null and b/docs/source/_static/linear-regression-tensor-board.png differ
diff --git a/docs/source/_static/logistic-regression/1557035393445.png b/docs/source/_static/logistic-regression/1557035393445.png
new file mode 100644
index 000000000..a9ca67a8b
Binary files /dev/null and b/docs/source/_static/logistic-regression/1557035393445.png differ
diff --git a/docs/source/_static/minimize-square-cost.png b/docs/source/_static/minimize-square-cost.png
new file mode 100644
index 000000000..229a4cf53
Binary files /dev/null and b/docs/source/_static/minimize-square-cost.png differ
diff --git a/docs/source/_static/new-project-console.png b/docs/source/_static/new-project-console.png
new file mode 100644
index 000000000..d4bfbc68c
Binary files /dev/null and b/docs/source/_static/new-project-console.png differ
diff --git a/docs/source/_static/new-project.png b/docs/source/_static/new-project.png
new file mode 100644
index 000000000..789b5f1fd
Binary files /dev/null and b/docs/source/_static/new-project.png differ
diff --git a/docs/source/_static/regression-dataset.png b/docs/source/_static/regression-dataset.png
new file mode 100644
index 000000000..0cd46f46e
Binary files /dev/null and b/docs/source/_static/regression-dataset.png differ
diff --git a/docs/The-Definitive-Guide/assets/row-major-order.png b/docs/source/_static/row-major-order.png
similarity index 100%
rename from docs/The-Definitive-Guide/assets/row-major-order.png
rename to docs/source/_static/row-major-order.png
diff --git a/docs/source/_static/sigmoid.png b/docs/source/_static/sigmoid.png
new file mode 100644
index 000000000..4321a1a41
Binary files /dev/null and b/docs/source/_static/sigmoid.png differ
diff --git a/docs/source/_static/tensor-constant-ndarray.png b/docs/source/_static/tensor-constant-ndarray.png
new file mode 100644
index 000000000..3610ee0cd
Binary files /dev/null and b/docs/source/_static/tensor-constant-ndarray.png differ
diff --git a/docs/source/_static/tensor-naming.png b/docs/source/_static/tensor-naming.png
new file mode 100644
index 000000000..7b1d408b9
Binary files /dev/null and b/docs/source/_static/tensor-naming.png differ
diff --git a/docs/source/conf.py b/docs/source/conf.py
new file mode 100644
index 000000000..68b6f5aee
--- /dev/null
+++ b/docs/source/conf.py
@@ -0,0 +1,179 @@
+# -*- coding: utf-8 -*-
+#
+# Configuration file for the Sphinx documentation builder.
+#
+# This file does only contain a selection of the most common options. For a
+# full list see the documentation:
+# http://www.sphinx-doc.org/en/master/config
+
+# -- Path setup --------------------------------------------------------------
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+#
+# import os
+# import sys
+# sys.path.insert(0, os.path.abspath('.'))
+
+
+# -- Project information -----------------------------------------------------
+
+project = 'TensorFlow.NET'
+copyright = '2019, Haiping Chen'
+author = 'Haiping Chen'
+
+# The short X.Y version
+version = '0.6.0'
+# The full version, including alpha/beta/rc tags
+release = '0.6.0'
+
+
+# -- General configuration ---------------------------------------------------
+
+# If your documentation needs a minimal Sphinx version, state it here.
+#
+# needs_sphinx = '1.0'
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
+# ones.
+extensions = [
+ 'sphinx.ext.mathjax',
+]
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# The suffix(es) of source filenames.
+# You can specify multiple suffix as a list of string:
+#
+# source_suffix = ['.rst', '.md']
+from recommonmark.parser import CommonMarkParser
+source_parsers = {'.md': CommonMarkParser}
+source_suffix = ['.rst', '.md']
+
+# The master toctree document.
+master_doc = 'index'
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#
+# This is also used if you do content translation via gettext catalogs.
+# Usually you set "language" from the command line for these cases.
+language = None
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+# This pattern also affects html_static_path and html_extra_path.
+exclude_patterns = []
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+
+# -- Options for HTML output -------------------------------------------------
+
+# The theme to use for HTML and HTML Help pages. See the documentation for
+# a list of builtin themes.
+#
+html_theme = 'sphinx_rtd_theme'
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further. For a list of options available for each theme, see the
+# documentation.
+#
+# html_theme_options = {}
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
+
+# Custom sidebar templates, must be a dictionary that maps document names
+# to template names.
+#
+# The default sidebars (for documents that don't match any pattern) are
+# defined by theme itself. Builtin themes are using these templates by
+# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
+# 'searchbox.html']``.
+#
+# html_sidebars = {}
+
+
+# -- Options for HTMLHelp output ---------------------------------------------
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = 'TensorFlowNETdoc'
+
+
+# -- Options for LaTeX output ------------------------------------------------
+
+latex_elements = {
+ # The paper size ('letterpaper' or 'a4paper').
+ #
+ # 'papersize': 'letterpaper',
+
+ # The font size ('10pt', '11pt' or '12pt').
+ #
+ # 'pointsize': '10pt',
+
+ # Additional stuff for the LaTeX preamble.
+ #
+ # 'preamble': '',
+
+ # Latex figure (float) alignment
+ #
+ # 'figure_align': 'htbp',
+}
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title,
+# author, documentclass [howto, manual, or own class]).
+latex_documents = [
+ (master_doc, 'TensorFlowNET.tex', 'TensorFlow.NET Documentation',
+ 'Haiping Chen', 'manual'),
+]
+
+
+# -- Options for manual page output ------------------------------------------
+
+# One entry per manual page. List of tuples
+# (source start file, name, description, authors, manual section).
+man_pages = [
+ (master_doc, 'tensorflownet', 'TensorFlow.NET Documentation',
+ [author], 1)
+]
+
+
+# -- Options for Texinfo output ----------------------------------------------
+
+# Grouping the document tree into Texinfo files. List of tuples
+# (source start file, target name, title, author,
+# dir menu entry, description, category)
+texinfo_documents = [
+ (master_doc, 'TensorFlowNET', 'TensorFlow.NET Documentation',
+ author, 'TensorFlowNET', 'One line description of project.',
+ 'Miscellaneous'),
+]
+
+
+# -- Options for Epub output -------------------------------------------------
+
+# Bibliographic Dublin Core info.
+epub_title = project
+
+# The unique identifier of the text. This can be a ISBN number
+# or the project homepage.
+#
+# epub_identifier = ''
+
+# A unique identification for the text.
+#
+# epub_uid = ''
+
+# A list of files that should not be packed into the epub file.
+epub_exclude_files = ['search.html']
+
+
+# -- Extension configuration -------------------------------------------------
diff --git a/docs/source/index.rst b/docs/source/index.rst
new file mode 100644
index 000000000..61f0d752e
--- /dev/null
+++ b/docs/source/index.rst
@@ -0,0 +1,34 @@
+.. TensorFlow.NET documentation master file, created by
+ sphinx-quickstart on Sat Jan 5 09:26:55 2019.
+ You can adapt this file completely to your liking, but it should at least
+ contain the root `toctree` directive.
+
+Welcome to TensorFlow.NET's documentation!
+==========================================
+
+
+.. toctree::
+ :maxdepth: 3
+ :caption: The Definitive Guide to TensorFlow.NET
+
+ FrontCover
+ Foreword
+ Preface
+ HelloWorld
+ Tensor
+ Constant
+ Variable
+ Placeholder
+ Graph
+ Session
+ Operation
+ Queue
+ Gradient
+ Train
+ EagerMode
+ LinearRegression
+ LogisticRegression
+ NearestNeighbor
+ ImageRecognition
+ NeuralNetwork
+ ConvolutionNeuralNetwork
\ No newline at end of file
diff --git a/graph/InceptionV3.meta b/graph/InceptionV3.meta
new file mode 100644
index 000000000..fe220cce1
Binary files /dev/null and b/graph/InceptionV3.meta differ
diff --git a/graph/README.md b/graph/README.md
new file mode 100644
index 000000000..491e2374a
--- /dev/null
+++ b/graph/README.md
@@ -0,0 +1 @@
+These are models built with the original tensorflow library. They can be imported in TensorFlow.NET and trained. See the examples for how to do so.
\ No newline at end of file
diff --git a/graph/att_rnn_untrained.meta b/graph/att_rnn_untrained.meta
new file mode 100644
index 000000000..438e37aeb
Binary files /dev/null and b/graph/att_rnn_untrained.meta differ
diff --git a/graph/char_cnn_untrained.meta b/graph/char_cnn_untrained.meta
new file mode 100644
index 000000000..1e99f99b6
Binary files /dev/null and b/graph/char_cnn_untrained.meta differ
diff --git a/graph/cond_test.meta b/graph/cond_test.meta
new file mode 100644
index 000000000..2110d5770
Binary files /dev/null and b/graph/cond_test.meta differ
diff --git a/graph/kmeans.meta b/graph/kmeans.meta
new file mode 100644
index 000000000..0ad4f03f2
Binary files /dev/null and b/graph/kmeans.meta differ
diff --git a/graph/lstm_crf_ner.meta b/graph/lstm_crf_ner.meta
new file mode 100644
index 000000000..19a267e29
Binary files /dev/null and b/graph/lstm_crf_ner.meta differ
diff --git a/graph/rcnn_untrained.meta b/graph/rcnn_untrained.meta
new file mode 100644
index 000000000..1cd86abba
Binary files /dev/null and b/graph/rcnn_untrained.meta differ
diff --git a/graph/vd_cnn.meta b/graph/vd_cnn.meta
new file mode 100644
index 000000000..b857fc6c5
Binary files /dev/null and b/graph/vd_cnn.meta differ
diff --git a/graph/word2vec.meta b/graph/word2vec.meta
new file mode 100644
index 000000000..df120b7f8
Binary files /dev/null and b/graph/word2vec.meta differ
diff --git a/graph/word_cnn.meta b/graph/word_cnn.meta
new file mode 100644
index 000000000..141947b19
Binary files /dev/null and b/graph/word_cnn.meta differ
diff --git a/graph/word_cnn_untrained.meta b/graph/word_cnn_untrained.meta
new file mode 100644
index 000000000..a29a33a0a
Binary files /dev/null and b/graph/word_cnn_untrained.meta differ
diff --git a/graph/word_rnn_untrained.meta b/graph/word_rnn_untrained.meta
new file mode 100644
index 000000000..a5c749a9b
Binary files /dev/null and b/graph/word_rnn_untrained.meta differ
diff --git a/graph/xor.meta b/graph/xor.meta
new file mode 100644
index 000000000..f466e49af
Binary files /dev/null and b/graph/xor.meta differ
diff --git a/redist/SciSharp.TensorFlow-Cpu.Redist/SciSharp.TensorFlow-Cpu.Redist.csproj b/redist/SciSharp.TensorFlow-Cpu.Redist/SciSharp.TensorFlow-Cpu.Redist.csproj
new file mode 100644
index 000000000..4d0fa1f0e
--- /dev/null
+++ b/redist/SciSharp.TensorFlow-Cpu.Redist/SciSharp.TensorFlow-Cpu.Redist.csproj
@@ -0,0 +1,63 @@
+
+
+
+ netstandard2.0
+ win-x64;linux-x64
+ SciSharp.Tensorflow-Cpu.Redist
+
+ SciSharp.Tensorflow-Cpu.Redist
+ 1.0.0
+ SciSharp team
+ SciSharp STACK
+ https://github.com/SciSharp/TensorFlow.NET
+ git
+
+ Meta-package for GPU Tensoflow library runtime distribution.
+ Libraries can be directly downloaded from https://storage.googleapis.com/tensorflow/libtensorflow/
+
+ Apache-2.0
+
+ https://github.com/SciSharp/TensorFlow.NET
+ native;tensorflow;machine-learning;ML
+ ../../packages
+ false
+
+ false
+ false
+ false
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ ../../packages;$(RestoreSources);https://api.nuget.org/v3/index.json
+
+
+
+
+
+
+
+
+
+
+
+
+ runtime.json
+ true
+ PreserveNewest
+
+
+
+
diff --git a/redist/SciSharp.TensorFlow-Cpu.Redist/runtime.json b/redist/SciSharp.TensorFlow-Cpu.Redist/runtime.json
new file mode 100644
index 000000000..a7a39cb52
--- /dev/null
+++ b/redist/SciSharp.TensorFlow-Cpu.Redist/runtime.json
@@ -0,0 +1,14 @@
+{
+ "runtimes": {
+ "linux-x64": {
+ "SciSharp.TensorFlow-Gpu.Redist": {
+ "runtime.linux-x64.SciSharp.Tensorflow-Cpu.Redist": "1.0.0"
+ }
+ },
+ "win-x64": {
+ "SciSharp.TensorFlow-Gpu.Redist": {
+ "runtime.win-x64.SciSharp.Tensorflow-Cpu.Redist": "1.0.0"
+ }
+ }
+ }
+}
diff --git a/redist/SciSharp.TensorFlow-Gpu.Redist/SciSharp.TensorFlow-Gpu.Redist.csproj b/redist/SciSharp.TensorFlow-Gpu.Redist/SciSharp.TensorFlow-Gpu.Redist.csproj
new file mode 100644
index 000000000..61ea992ed
--- /dev/null
+++ b/redist/SciSharp.TensorFlow-Gpu.Redist/SciSharp.TensorFlow-Gpu.Redist.csproj
@@ -0,0 +1,81 @@
+
+
+
+ Library
+ netstandard2.0
+
+ win-x64;linux-x64
+ SciSharp.Tensorflow-Gpu.Redist
+
+ SciSharp.Tensorflow-Gpu.Redist
+ 1.0.0
+ SciSharp team
+ SciSharp STACK
+ https://github.com/SciSharp/TensorFlow.NET
+ git
+
+ Meta-package for GPU Tensoflow library runtime distribution.
+ Libraries can be directly downloaded from https://storage.googleapis.com/tensorflow/libtensorflow/
+
+ Apache-2.0
+
+ https://github.com/SciSharp/TensorFlow.NET
+ native;tensorflow;machine-learning;ML
+ ../../packages
+ false
+
+ false
+ false
+ false
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ ../../packages;$(RestoreSources);https://api.nuget.org/v3/index.json
+
+
+
+
+
+
+
+
+
+
+
+
+ runtime.json
+ true
+ PreserveNewest
+
+
+
+
diff --git a/redist/SciSharp.TensorFlow-Gpu.Redist/runtime.json b/redist/SciSharp.TensorFlow-Gpu.Redist/runtime.json
new file mode 100644
index 000000000..392dc3ccf
--- /dev/null
+++ b/redist/SciSharp.TensorFlow-Gpu.Redist/runtime.json
@@ -0,0 +1,14 @@
+{
+ "runtimes": {
+ "linux-x64": {
+ "SciSharp.TensorFlow-Gpu.Redist": {
+ "runtime.linux-x64.SciSharp.Tensorflow-Gpu.Redist": "1.0.0"
+ }
+ },
+ "win-x64": {
+ "SciSharp.TensorFlow-Gpu.Redist": {
+ "runtime.win-x64.SciSharp.Tensorflow-Gpu.Redist": "1.0.0"
+ }
+ }
+ }
+}
diff --git a/redist/TensorFlow.NET.Redist.sln b/redist/TensorFlow.NET.Redist.sln
new file mode 100644
index 000000000..a21dc9dc9
--- /dev/null
+++ b/redist/TensorFlow.NET.Redist.sln
@@ -0,0 +1,60 @@
+
+Microsoft Visual Studio Solution File, Format Version 12.00
+# Visual Studio Version 16
+VisualStudioVersion = 16.0.29102.190
+MinimumVisualStudioVersion = 10.0.40219.1
+Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Solution Items", "Solution Items", "{1E65784D-C976-4DFF-991A-DD5C57FFC8E2}"
+ ProjectSection(SolutionItems) = preProject
+ scripts\Copy-NativeTensorFlowLibs.ps1 = scripts\Copy-NativeTensorFlowLibs.ps1
+ EndProjectSection
+EndProject
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "runtime.linux-x64.SciSharp.TensorFlow-Cpu.Redist", "runtime.linux-x64.SciSharp.TensorFlow-Cpu.Redist\runtime.linux-x64.SciSharp.TensorFlow-Cpu.Redist.csproj", "{9834D2B4-01BF-4D18-8DCF-F498AC481FE7}"
+EndProject
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "runtime.linux-x64.SciSharp.TensorFlow-Gpu.Redist", "runtime.linux-x64.SciSharp.TensorFlow-Gpu.Redist\runtime.linux-x64.SciSharp.TensorFlow-Gpu.Redist.csproj", "{9D853997-3143-4F87-B995-7D7024CF4E1A}"
+EndProject
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "runtime.win-x64.SciSharp.TensorFlow-Cpu.Redist", "runtime.win-x64.SciSharp.TensorFlow-Cpu.Redist\runtime.win-x64.SciSharp.TensorFlow-Cpu.Redist.csproj", "{878C1EE4-B945-41BF-98DE-C4747C28022A}"
+EndProject
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "runtime.win-x64.SciSharp.TensorFlow-Gpu.Redist", "runtime.win-x64.SciSharp.TensorFlow-Gpu.Redist\runtime.win-x64.SciSharp.TensorFlow-Gpu.Redist.csproj", "{744A3D51-CEF6-4685-B4C3-718FA61143A0}"
+EndProject
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SciSharp.TensorFlow-Cpu.Redist", "SciSharp.TensorFlow-Cpu.Redist\SciSharp.TensorFlow-Cpu.Redist.csproj", "{0A281E9C-6E3D-4172-84BA-2B5F6E9F4D5B}"
+EndProject
+Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "SciSharp.TensorFlow-Gpu.Redist", "SciSharp.TensorFlow-Gpu.Redist\SciSharp.TensorFlow-Gpu.Redist.csproj", "{1910BE36-82E3-4465-B3B1-788BFD252DB7}"
+EndProject
+Global
+ GlobalSection(SolutionConfigurationPlatforms) = preSolution
+ Debug|Any CPU = Debug|Any CPU
+ Release|Any CPU = Release|Any CPU
+ EndGlobalSection
+ GlobalSection(ProjectConfigurationPlatforms) = postSolution
+ {9834D2B4-01BF-4D18-8DCF-F498AC481FE7}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {9834D2B4-01BF-4D18-8DCF-F498AC481FE7}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {9834D2B4-01BF-4D18-8DCF-F498AC481FE7}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {9834D2B4-01BF-4D18-8DCF-F498AC481FE7}.Release|Any CPU.Build.0 = Release|Any CPU
+ {9D853997-3143-4F87-B995-7D7024CF4E1A}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {9D853997-3143-4F87-B995-7D7024CF4E1A}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {9D853997-3143-4F87-B995-7D7024CF4E1A}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {9D853997-3143-4F87-B995-7D7024CF4E1A}.Release|Any CPU.Build.0 = Release|Any CPU
+ {878C1EE4-B945-41BF-98DE-C4747C28022A}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {878C1EE4-B945-41BF-98DE-C4747C28022A}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {878C1EE4-B945-41BF-98DE-C4747C28022A}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {878C1EE4-B945-41BF-98DE-C4747C28022A}.Release|Any CPU.Build.0 = Release|Any CPU
+ {744A3D51-CEF6-4685-B4C3-718FA61143A0}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {744A3D51-CEF6-4685-B4C3-718FA61143A0}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {744A3D51-CEF6-4685-B4C3-718FA61143A0}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {744A3D51-CEF6-4685-B4C3-718FA61143A0}.Release|Any CPU.Build.0 = Release|Any CPU
+ {0A281E9C-6E3D-4172-84BA-2B5F6E9F4D5B}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {0A281E9C-6E3D-4172-84BA-2B5F6E9F4D5B}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {0A281E9C-6E3D-4172-84BA-2B5F6E9F4D5B}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {0A281E9C-6E3D-4172-84BA-2B5F6E9F4D5B}.Release|Any CPU.Build.0 = Release|Any CPU
+ {1910BE36-82E3-4465-B3B1-788BFD252DB7}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
+ {1910BE36-82E3-4465-B3B1-788BFD252DB7}.Debug|Any CPU.Build.0 = Debug|Any CPU
+ {1910BE36-82E3-4465-B3B1-788BFD252DB7}.Release|Any CPU.ActiveCfg = Release|Any CPU
+ {1910BE36-82E3-4465-B3B1-788BFD252DB7}.Release|Any CPU.Build.0 = Release|Any CPU
+ EndGlobalSection
+ GlobalSection(SolutionProperties) = preSolution
+ HideSolutionNode = FALSE
+ EndGlobalSection
+ GlobalSection(ExtensibilityGlobals) = postSolution
+ SolutionGuid = {CD7D5F34-42AE-4CCB-BDFA-1619B3A84708}
+ EndGlobalSection
+EndGlobal
diff --git a/redist/runtime.linux-x64.SciSharp.TensorFlow-Cpu.Redist/runtime.linux-x64.SciSharp.TensorFlow-Cpu.Redist.csproj b/redist/runtime.linux-x64.SciSharp.TensorFlow-Cpu.Redist/runtime.linux-x64.SciSharp.TensorFlow-Cpu.Redist.csproj
new file mode 100644
index 000000000..ea6d4186d
--- /dev/null
+++ b/redist/runtime.linux-x64.SciSharp.TensorFlow-Cpu.Redist/runtime.linux-x64.SciSharp.TensorFlow-Cpu.Redist.csproj
@@ -0,0 +1,39 @@
+
+
+
+ netstandard2.0
+ linux-x64
+ SciSharp.Tensorflow-Cpu.Redist
+
+ runtime.linux-x64.SciSharp.Tensorflow-Cpu.Redist
+ 1.0.0
+ SciSharp team
+ SciSharp STACK
+ https://github.com/SciSharp/TensorFlow.NET
+ git
+
+ Distribution of the Linux CPU Tensoflow library.
+ The libraries can be directly downloaded from https://storage.googleapis.com/tensorflow/libtensorflow/
+
+ Apache-2.0
+
+ https://github.com/SciSharp/TensorFlow.NET
+ native;tensorflow;machine-learning;ML
+ ../../packages
+ false
+
+ false
+ false
+ false
+
+
+
+
+
+ runtimes/$(RuntimeIdentifier)/native/%(Filename)%(Extension)
+ true
+ PreserveNewest
+
+
+
+
diff --git a/redist/runtime.linux-x64.SciSharp.TensorFlow-Gpu.Redist/runtime.linux-x64.SciSharp.TensorFlow-Gpu.Redist.csproj b/redist/runtime.linux-x64.SciSharp.TensorFlow-Gpu.Redist/runtime.linux-x64.SciSharp.TensorFlow-Gpu.Redist.csproj
new file mode 100644
index 000000000..d680f38a6
--- /dev/null
+++ b/redist/runtime.linux-x64.SciSharp.TensorFlow-Gpu.Redist/runtime.linux-x64.SciSharp.TensorFlow-Gpu.Redist.csproj
@@ -0,0 +1,40 @@
+
+
+
+ Library
+ netstandard2.0
+ linux-x64
+ SciSharp.Tensorflow-Gpu.Redist
+
+ runtime.linux-x64.SciSharp.Tensorflow-Gpu.Redist
+ 1.0.0
+ SciSharp team
+ SciSharp STACK
+ https://github.com/SciSharp/TensorFlow.NET
+ git
+
+ Distribution of the Linux GPU Tensoflow library.
+ Dll can be directly downloaded from https://storage.googleapis.com/tensorflow/libtensorflow/
+
+ Apache-2.0
+
+ https://github.com/SciSharp/TensorFlow.NET
+ native;tensorflow;machine-learning;ML
+ ../../packages
+ false
+
+ false
+ false
+ false
+
+
+
+
+
+ runtimes/$(RuntimeIdentifier)/native/%(Filename)%(Extension)
+ true
+ PreserveNewest
+
+
+
+
\ No newline at end of file
diff --git a/redist/runtime.win-x64.SciSharp.TensorFlow-Cpu.Redist/runtime.win-x64.SciSharp.TensorFlow-Cpu.Redist.csproj b/redist/runtime.win-x64.SciSharp.TensorFlow-Cpu.Redist/runtime.win-x64.SciSharp.TensorFlow-Cpu.Redist.csproj
new file mode 100644
index 000000000..19e7854cb
--- /dev/null
+++ b/redist/runtime.win-x64.SciSharp.TensorFlow-Cpu.Redist/runtime.win-x64.SciSharp.TensorFlow-Cpu.Redist.csproj
@@ -0,0 +1,39 @@
+
+
+
+ netstandard2.0
+ win-x64
+ SciSharp.Tensorflow-Cpu.Redist
+
+ runtime.win-x64.SciSharp.Tensorflow-Cpu.Redist
+ 1.0.0
+ SciSharp team
+ SciSharp STACK
+ https://github.com/SciSharp/TensorFlow.NET
+ git
+
+ Distribution of the windows GPU Tensoflow library.
+ The libraries can be directly downloaded from https://storage.googleapis.com/tensorflow/libtensorflow/
+
+ Apache-2.0
+
+ https://github.com/SciSharp/TensorFlow.NET
+ native;tensorflow;machine-learning;ML
+ ../../packages
+ false
+
+ false
+ false
+ false
+
+
+
+
+
+ runtimes/$(RuntimeIdentifier)/native/%(Filename)%(Extension)
+ true
+ PreserveNewest
+
+
+
+
diff --git a/redist/runtime.win-x64.SciSharp.TensorFlow-Gpu.Redist/.gitignore b/redist/runtime.win-x64.SciSharp.TensorFlow-Gpu.Redist/.gitignore
new file mode 100644
index 000000000..fca132d9b
--- /dev/null
+++ b/redist/runtime.win-x64.SciSharp.TensorFlow-Gpu.Redist/.gitignore
@@ -0,0 +1 @@
+tensorflow.dll
diff --git a/redist/runtime.win-x64.SciSharp.TensorFlow-Gpu.Redist/runtime.win-x64.SciSharp.TensorFlow-Gpu.Redist.csproj b/redist/runtime.win-x64.SciSharp.TensorFlow-Gpu.Redist/runtime.win-x64.SciSharp.TensorFlow-Gpu.Redist.csproj
new file mode 100644
index 000000000..915e0e2a5
--- /dev/null
+++ b/redist/runtime.win-x64.SciSharp.TensorFlow-Gpu.Redist/runtime.win-x64.SciSharp.TensorFlow-Gpu.Redist.csproj
@@ -0,0 +1,40 @@
+
+
+
+ Library
+ netstandard2.0
+ win-x64
+ SciSharp.Tensorflow-Gpu.Redist
+
+ runtime.win-x64.SciSharp.Tensorflow-Gpu.Redist
+ 1.0.0
+ SciSharp team
+ SciSharp STACK
+ https://github.com/SciSharp/TensorFlow.NET
+ git
+
+ Distribution of the windows GPU Tensoflow library.
+ Dll can be directly downloaded from https://storage.googleapis.com/tensorflow/libtensorflow/
+
+ Apache-2.0
+
+ https://github.com/SciSharp/TensorFlow.NET
+ native;tensorflow;machine-learning;ML
+ ../../packages
+ false
+
+ false
+ false
+ false
+
+
+
+
+
+ runtimes/$(RuntimeIdentifier)/native/%(Filename)%(Extension)
+ true
+ PreserveNewest
+
+
+
+
\ No newline at end of file
diff --git a/src/SciSharp.TensorFlow.Redist/CommonPackage.props b/src/SciSharp.TensorFlow.Redist/CommonPackage.props
new file mode 100644
index 000000000..08fbb153a
--- /dev/null
+++ b/src/SciSharp.TensorFlow.Redist/CommonPackage.props
@@ -0,0 +1,24 @@
+
+
+
+
+
+ PreserveNewest
+ false
+ %(Filename)%(Extension)
+
+
+ PreserveNewest
+ false
+ %(Filename)%(Extension)
+
+
+
+
\ No newline at end of file
diff --git a/src/SciSharp.TensorFlow.Redist/README.md b/src/SciSharp.TensorFlow.Redist/README.md
new file mode 100644
index 000000000..4002aa21d
--- /dev/null
+++ b/src/SciSharp.TensorFlow.Redist/README.md
@@ -0,0 +1,40 @@
+## SciSharp.TensorFlow.Redist ##
+
+
+`SciSharp.TensorFlow.Redist` is a migration from [Microsoft.ML.TensorFlow.Redist](https://github.com/dotnet/machinelearning/tree/release/1.2/src/Redist/Microsoft.ML.TensorFlow.Redist). [ML.NET](https://github.com/dotnet/machinelearning) team will not maintain the package since [ML.NET](https://www.nuget.org/packages/Microsoft.ML) v1.3.0 going forward.
+
+* CPU version for all platforms (Windows, Linux, OSX)
+```powershell
+PM> Install-Package SciSharp.TensorFlow.Redist
+```
+
+* GPU version for Windows
+```powershell
+PM> Install-Package SciSharp.TensorFlow.Redist-Windows-GPU
+```
+
+* GPU version for Linux
+```powershell
+PM> Install-Package SciSharp.TensorFlow.Redist-Linux-GPU
+```
+
+https://www.nuget.org/packages/SciSharp.TensorFlow.Redist
+
+Related merged [commits](https://github.com/SciSharp/TensorFlow.NET/commit/854a5ba61ad0e400623821236bd117cc24c6cb77).
+
+
+
+#### Download pre-build package
+
+[Mac OSX CPU](https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-cpu-darwin-x86_64-2.10.0.tar.gz), [Linux CPU](https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-cpu-linux-x86_64-2.10.0.tar.gz), [Linux GPU](https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-gpu-linux-x86_64-2.10.0.tar.gz), [Windows CPU](https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-cpu-windows-x86_64-2.10.0.zip), [Windows GPU](https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-gpu-windows-x86_64-2.10.0.zip)
+
+
+
+#### Pack and Deploy ####
+
+On Windows, the tar command does not support extracting archives with symlinks. So when `dotnet pack` runs on Windows it will only package the Windows binaries.
+
+1. Run `dotnet pack SciSharp.TensorFlow.Redist.nupkgproj` under `src/SciSharp.TensorFlow.Redist` directory in Linux.
+2. Run `dotnet nuget push SciSharp.TensorFlow.Redist.2.10.0.nupkg -k APIKEY -s https://api.nuget.org/v3/index.json -t 600`
+
+
diff --git a/src/SciSharp.TensorFlow.Redist/Redist-CPU.nuspec b/src/SciSharp.TensorFlow.Redist/Redist-CPU.nuspec
new file mode 100644
index 000000000..1524a0f86
--- /dev/null
+++ b/src/SciSharp.TensorFlow.Redist/Redist-CPU.nuspec
@@ -0,0 +1,27 @@
+
+
+
+ $packageId$
+ $version$
+ The TensorFlow Authors
+ The TensorFlow Authors
+ true
+ LICENSE.txt
+ https://aka.ms/deprecateLicenseUrl
+ https://www.tensorflow.org/
+ https://avatars3.githubusercontent.com/u/44989469?s=200&v=4
+ $packageId$ contains the TensorFlow C library CPU version $version$ redistributed as a NuGet package.
+ https://github.com/tensorflow/tensorflow/releases/tag/v$version$
+ Copyright 2019 The TensorFlow Authors. All rights reserved.
+ TensorFlow
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/src/SciSharp.TensorFlow.Redist/Redist-Linux-GPU.nuspec b/src/SciSharp.TensorFlow.Redist/Redist-Linux-GPU.nuspec
new file mode 100644
index 000000000..3a08b37d6
--- /dev/null
+++ b/src/SciSharp.TensorFlow.Redist/Redist-Linux-GPU.nuspec
@@ -0,0 +1,27 @@
+
+
+
+ $packageId$
+ $version$
+ The TensorFlow Authors
+ The TensorFlow Authors
+ true
+ LICENSE.txt
+ https://aka.ms/deprecateLicenseUrl
+ https://www.tensorflow.org/
+ https://avatars3.githubusercontent.com/u/44989469?s=200&v=4
+ $packageId$ contains the TensorFlow C library GPU for Linux version $version$ redistributed as a NuGet package.
+ https://github.com/tensorflow/tensorflow/releases/tag/v$version$
+ Copyright 2019 The TensorFlow Authors. All rights reserved.
+ TensorFlow
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/src/SciSharp.TensorFlow.Redist/Redist-Windows-GPU.nuspec b/src/SciSharp.TensorFlow.Redist/Redist-Windows-GPU.nuspec
new file mode 100644
index 000000000..769838368
--- /dev/null
+++ b/src/SciSharp.TensorFlow.Redist/Redist-Windows-GPU.nuspec
@@ -0,0 +1,27 @@
+
+
+
+ $packageId$
+ $version$
+ The TensorFlow Authors
+ The TensorFlow Authors
+ true
+ LICENSE.txt
+ https://aka.ms/deprecateLicenseUrl
+ https://www.tensorflow.org/
+ https://avatars3.githubusercontent.com/u/44989469?s=200&v=4
+ $packageId$ contains the TensorFlow C library GPU for Windows version $version$ redistributed as a NuGet package.
+ https://github.com/tensorflow/tensorflow/releases/tag/v$version$
+ Copyright 2019 The TensorFlow Authors. All rights reserved.
+ TensorFlow
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/src/SciSharp.TensorFlow.Redist/SciSharp.TensorFlow.Redist-Linux-GPU.nupkgproj b/src/SciSharp.TensorFlow.Redist/SciSharp.TensorFlow.Redist-Linux-GPU.nupkgproj
new file mode 100644
index 000000000..63e11ed4e
--- /dev/null
+++ b/src/SciSharp.TensorFlow.Redist/SciSharp.TensorFlow.Redist-Linux-GPU.nupkgproj
@@ -0,0 +1,174 @@
+
+
+
+ $(MSBuildThisFileDirectory)
+ $(ProjDir)bin\
+ $(ProjDir)obj\
+
+ x64
+ netstandard2.0
+ 1.14.0
+ 1
+
+ $(BinDir)packages\
+ $(MSBuildProjectName)
+ $(TensorFlowVersion)
+
+ true
+ false
+
+ Redist-Linux-GPU.nuspec
+ packageId=$(PackageId);version=$(PackageVersion)
+ $(ProjDir)
+
+ CopyFilesFromArchive
+
+ win
+ linux
+ osx
+ $(PackageRid)-$(TargetArchitecture)
+
+
+
+
+ false
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ <_downloadFiles Include="@(TensorFlowArchive);@(AdditionalDownloadFile)" Url="%(Identity)" DestinationFile="%(DownloadFile)" />
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ @(FilesWithHashes->'%(FileHash)')
+ $([System.IO.File]::ReadAllText('%(LocalShaFile)').Replace("%0A", "").Replace("%0D", ""))
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ <_fileFromArchive Include="%(TensorFlowArchive.FilesFromArchive)" ExtractDirectory="%(TensorFlowArchive.ExtractDirectory)" Runtime="%(TensorFlowArchive.Runtime)" />
+ <_fileFromArchive DestinationFile="%(FileName)%(Extension)"/>
+ <_fileFromArchive PackagePath="runtimes\%(_fileFromArchive.Runtime)\native\%(_fileFromArchive.DestinationFile)" />
+
+
+ <_fileFromArchive Condition="'%(DestinationFile)' == 'LICENSE'" PackagePath="THIRD_PARTY_NOTICES.txt" Runtime="" />
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/src/SciSharp.TensorFlow.Redist/SciSharp.TensorFlow.Redist-Windows-GPU.nupkgproj b/src/SciSharp.TensorFlow.Redist/SciSharp.TensorFlow.Redist-Windows-GPU.nupkgproj
new file mode 100644
index 000000000..e2b101fac
--- /dev/null
+++ b/src/SciSharp.TensorFlow.Redist/SciSharp.TensorFlow.Redist-Windows-GPU.nupkgproj
@@ -0,0 +1,172 @@
+
+
+
+ $(MSBuildThisFileDirectory)
+ $(ProjDir)bin\
+ $(ProjDir)obj\
+
+ x64
+ netstandard2.0
+ 1.14.0
+ 1
+
+ $(BinDir)packages\
+ $(MSBuildProjectName)
+ $(TensorFlowVersion)
+
+ true
+ false
+
+ Redist-Windows-GPU.nuspec
+ packageId=$(PackageId);version=$(PackageVersion)
+ $(ProjDir)
+
+ CopyFilesFromArchive
+
+ win
+ linux
+ osx
+ $(PackageRid)-$(TargetArchitecture)
+
+
+
+
+ false
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ <_downloadFiles Include="@(TensorFlowArchive);@(AdditionalDownloadFile)" Url="%(Identity)" DestinationFile="%(DownloadFile)" />
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ @(FilesWithHashes->'%(FileHash)')
+ $([System.IO.File]::ReadAllText('%(LocalShaFile)').Replace("%0A", "").Replace("%0D", ""))
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ <_fileFromArchive Include="%(TensorFlowArchive.FilesFromArchive)" ExtractDirectory="%(TensorFlowArchive.ExtractDirectory)" Runtime="%(TensorFlowArchive.Runtime)" />
+ <_fileFromArchive DestinationFile="%(FileName)%(Extension)"/>
+ <_fileFromArchive PackagePath="runtimes\%(_fileFromArchive.Runtime)\native\%(_fileFromArchive.DestinationFile)" />
+
+
+ <_fileFromArchive Condition="'%(DestinationFile)' == 'LICENSE'" PackagePath="THIRD_PARTY_NOTICES.txt" Runtime="" />
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/src/SciSharp.TensorFlow.Redist/SciSharp.TensorFlow.Redist.nupkgproj b/src/SciSharp.TensorFlow.Redist/SciSharp.TensorFlow.Redist.nupkgproj
new file mode 100644
index 000000000..85ca28984
--- /dev/null
+++ b/src/SciSharp.TensorFlow.Redist/SciSharp.TensorFlow.Redist.nupkgproj
@@ -0,0 +1,187 @@
+
+
+
+ $(MSBuildThisFileDirectory)
+ $(ProjDir)bin\
+ $(ProjDir)obj\
+
+ x64
+ netstandard2.0
+ 1.15.0
+ 1
+
+ $(BinDir)packages\
+ $(MSBuildProjectName)
+ $(TensorFlowVersion)
+
+ true
+ false
+
+ Redist-CPU.nuspec
+ packageId=$(PackageId);version=$(PackageVersion)
+ $(ProjDir)
+
+ CopyFilesFromArchive
+
+ win
+ linux
+ osx
+ $(PackageRid)-$(TargetArchitecture)
+
+
+
+
+ false
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ <_downloadFiles Include="@(TensorFlowArchive);@(AdditionalDownloadFile)" Url="%(Identity)" DestinationFile="%(DownloadFile)" />
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ @(FilesWithHashes->'%(FileHash)')
+ $([System.IO.File]::ReadAllText('%(LocalShaFile)').Replace("%0A", "").Replace("%0D", ""))
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ <_fileFromArchive Include="%(TensorFlowArchive.FilesFromArchive)" ExtractDirectory="%(TensorFlowArchive.ExtractDirectory)" Runtime="%(TensorFlowArchive.Runtime)" />
+ <_fileFromArchive DestinationFile="%(FileName)%(Extension)"/>
+ <_fileFromArchive PackagePath="runtimes\%(_fileFromArchive.Runtime)\native\%(_fileFromArchive.DestinationFile)" />
+
+
+ <_fileFromArchive Condition="'%(DestinationFile)' == 'LICENSE'" PackagePath="THIRD_PARTY_NOTICES.txt" Runtime="" />
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/src/TensorFlowNET.Core/APIs/c_api.cs b/src/TensorFlowNET.Core/APIs/c_api.cs
new file mode 100644
index 000000000..a91b86827
--- /dev/null
+++ b/src/TensorFlowNET.Core/APIs/c_api.cs
@@ -0,0 +1,107 @@
+/*****************************************************************************
+ Copyright 2020 Haiping Chen. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+******************************************************************************/
+
+using System;
+using System.Runtime.InteropServices;
+using static Tensorflow.CppShapeInferenceResult.Types;
+
+namespace Tensorflow
+{
+ ///
+ /// C API for TensorFlow.
+ /// Port from tensorflow\c\c_api.h
+ ///
+ /// The API leans towards simplicity and uniformity instead of convenience
+ /// since most usage will be by language specific wrappers.
+ ///
+ /// The params type mapping between c_api and .NET
+ /// TF_XX** => ref IntPtr (TF_Operation** op) => (ref IntPtr op)
+ /// TF_XX* => IntPtr (TF_Graph* graph) => (IntPtr graph)
+ /// struct => struct (TF_Output output) => (TF_Output output)
+ /// struct* => struct[] (TF_Output* output) => (TF_Output[] output)
+ /// struct* => struct* for ref
+ /// const char* => string
+ /// int32_t => int
+ /// int64_t* => long[]
+ /// size_t* => ulong[]
+ /// size_t* => ref ulong
+ /// void* => IntPtr
+ /// string => IntPtr c_api.StringPiece(IntPtr)
+ /// unsigned char => byte
+ ///
+ public partial class c_api
+ {
+ public const string TensorFlowLibName = "tensorflow";
+
+ public static string StringPiece(IntPtr handle)
+ {
+ return handle == IntPtr.Zero ? String.Empty : Marshal.PtrToStringAnsi(handle);
+ }
+
+ public unsafe static byte[] ByteStringPiece(Buffer? handle)
+ {
+ if (handle is null)
+ {
+ return new byte[0];
+ }
+ var data = handle.ToArray();
+ return data;
+ }
+
+ public unsafe static byte[] ByteStringPieceFromNativeString(IntPtr handle)
+ {
+ if (handle == IntPtr.Zero)
+ {
+ return new byte[0];
+ }
+
+ byte* str_data = (byte*)handle.ToPointer();
+ List bytes = new List();
+ byte current = 255;
+ while (current != ((byte)'\0'))
+ {
+ current = *(str_data++);
+ bytes.Add(current);
+ }
+ var data = bytes.ToArray();
+ return data;
+ }
+
+ [UnmanagedFunctionPointer(CallingConvention.Winapi)]
+ public delegate void Deallocator(IntPtr data, IntPtr size, ref DeallocatorArgs args);
+
+ [UnmanagedFunctionPointer(CallingConvention.Winapi)]
+ public delegate void DeallocatorV2(IntPtr data, long size, IntPtr args);
+
+ public struct DeallocatorArgs
+ {
+ internal static unsafe c_api.DeallocatorArgs* EmptyPtr;
+ internal static unsafe IntPtr Empty;
+
+ static unsafe DeallocatorArgs()
+ {
+ Empty = new IntPtr(EmptyPtr = (DeallocatorArgs*)Marshal.AllocHGlobal(Marshal.SizeOf()));
+ *EmptyPtr = new DeallocatorArgs() { gc_handle = IntPtr.Zero, deallocator_called = false };
+ }
+
+ public bool deallocator_called;
+ public IntPtr gc_handle;
+ }
+
+ [DllImport(TensorFlowLibName)]
+ internal static extern IntPtr TF_Version();
+ }
+}
diff --git a/src/TensorFlowNET.Core/APIs/c_api.customize.cs b/src/TensorFlowNET.Core/APIs/c_api.customize.cs
new file mode 100644
index 000000000..bee4897ee
--- /dev/null
+++ b/src/TensorFlowNET.Core/APIs/c_api.customize.cs
@@ -0,0 +1,17 @@
+using System;
+using System.Collections.Generic;
+using System.Runtime.InteropServices;
+using System.Text;
+
+namespace Tensorflow
+{
+ public partial class c_api
+ {
+ [DllImport(TensorFlowLibName)]
+ public static extern void TF_SetAttr(SafeGraphHandle graph, IntPtr op, string attr_name, SafeBufferHandle attr_value_proto, SafeStatusHandle status);
+ [DllImport(TensorFlowLibName)]
+ public static extern SafeBufferHandle TF_GetHandleShapeAndType(SafeGraphHandle c_graph, TF_Output output);
+ [DllImport(TensorFlowLibName)]
+ public static extern void TF_SetHandleShapeAndType(SafeGraphHandle c_graph, TF_Output output, byte[] data, long proto_len, SafeStatusHandle status);
+ }
+}
diff --git a/src/TensorFlowNET.Core/APIs/c_api_lite.cs b/src/TensorFlowNET.Core/APIs/c_api_lite.cs
new file mode 100644
index 000000000..5a437d261
--- /dev/null
+++ b/src/TensorFlowNET.Core/APIs/c_api_lite.cs
@@ -0,0 +1,91 @@
+using System;
+using System.Collections.Generic;
+using System.Runtime.InteropServices;
+using System.Text;
+using Tensorflow.Lite;
+
+namespace Tensorflow
+{
+ public class c_api_lite
+ {
+ public const string TensorFlowLibName = "tensorflowlite_c";
+
+ public static string StringPiece(IntPtr handle)
+ {
+ return handle == IntPtr.Zero ? String.Empty : Marshal.PtrToStringAnsi(handle);
+ }
+
+ [DllImport(TensorFlowLibName)]
+ public static extern IntPtr TfLiteVersion();
+
+ [DllImport(TensorFlowLibName)]
+ public static extern SafeTfLiteModelHandle TfLiteModelCreateFromFile(string model_path);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern void TfLiteModelDelete(IntPtr model);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern SafeTfLiteInterpreterOptionsHandle TfLiteInterpreterOptionsCreate();
+
+ [DllImport(TensorFlowLibName)]
+ public static extern void TfLiteInterpreterOptionsDelete(IntPtr options);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern void TfLiteInterpreterOptionsSetNumThreads(SafeTfLiteInterpreterOptionsHandle options, int num_threads);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern SafeTfLiteInterpreterHandle TfLiteInterpreterCreate(SafeTfLiteModelHandle model, SafeTfLiteInterpreterOptionsHandle optional_options);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern void TfLiteInterpreterDelete(IntPtr interpreter);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern TfLiteStatus TfLiteInterpreterAllocateTensors(SafeTfLiteInterpreterHandle interpreter);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern int TfLiteInterpreterGetInputTensorCount(SafeTfLiteInterpreterHandle interpreter);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern int TfLiteInterpreterGetOutputTensorCount(SafeTfLiteInterpreterHandle interpreter);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern TfLiteStatus TfLiteInterpreterResizeInputTensor(SafeTfLiteInterpreterHandle interpreter,
+ int input_index, int[] input_dims, int input_dims_size);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern TfLiteTensor TfLiteInterpreterGetInputTensor(SafeTfLiteInterpreterHandle interpreter, int input_index);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern TfLiteDataType TfLiteTensorType(TfLiteTensor tensor);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern int TfLiteTensorNumDims(TfLiteTensor tensor);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern int TfLiteTensorDim(TfLiteTensor tensor, int dim_index);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern int TfLiteTensorByteSize(TfLiteTensor tensor);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern IntPtr TfLiteTensorData(TfLiteTensor tensor);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern IntPtr TfLiteTensorName(TfLiteTensor tensor);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern TfLiteQuantizationParams TfLiteTensorQuantizationParams(TfLiteTensor tensor);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern TfLiteStatus TfLiteTensorCopyFromBuffer(TfLiteTensor tensor, IntPtr input_data, int input_data_size);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern TfLiteStatus TfLiteInterpreterInvoke(SafeTfLiteInterpreterHandle interpreter);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern IntPtr TfLiteInterpreterGetOutputTensor(SafeTfLiteInterpreterHandle interpreter, int output_index);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern TfLiteStatus TfLiteTensorCopyToBuffer(TfLiteTensor output_tensor, IntPtr output_data, int output_data_size);
+ }
+}
diff --git a/src/TensorFlowNET.Core/APIs/tf.array.cs b/src/TensorFlowNET.Core/APIs/tf.array.cs
new file mode 100644
index 000000000..b529cd319
--- /dev/null
+++ b/src/TensorFlowNET.Core/APIs/tf.array.cs
@@ -0,0 +1,350 @@
+/*****************************************************************************
+ Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+******************************************************************************/
+
+using Tensorflow.NumPy;
+using System.Collections.Generic;
+using System.Diagnostics;
+using System.Linq;
+using static Tensorflow.Binding;
+using Tensorflow.Operations;
+
+namespace Tensorflow
+{
+ public partial class tensorflow
+ {
+ ///
+ /// A convenient alias for None, useful for indexing arrays.
+ ///
+ public Slice newaxis = Slice.NewAxis;
+ ///
+ /// A convenient alias for ...
+ ///
+ public Slice ellipsis = Slice.Ellipsis;
+
+ ///
+ /// BatchToSpace for N-D tensors of type T.
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public Tensor batch_to_space_nd(T input, int[] block_shape, int[,] crops, string name = null)
+ => gen_array_ops.batch_to_space_nd(ops.convert_to_tensor(input), ops.convert_to_tensor(block_shape),
+ ops.convert_to_tensor(crops), name: name);
+
+ ///
+ /// Apply boolean mask to tensor.
+ ///
+ ///
+ ///
+ /// N-D tensor.
+ /// K-D boolean tensor, K <= N and K must be known statically.
+ ///
+ /// A 0-D int Tensor representing the axis in tensor to mask from.
+ /// (N-K+1)-dimensional tensor populated by entries in tensor corresponding to True values in mask.
+ public Tensor boolean_mask(T1 tensor, T2 mask, string name = "boolean_mask", int axis = 0)
+ => array_ops.boolean_mask(tensor, mask, name: name, axis: axis);
+
+ ///
+ /// Broadcast an array for a compatible shape.
+ ///
+ ///
+ ///
+ ///
+ ///
+ public Tensor broadcast_to(Tensor input, Shape shape, string name = null)
+ => gen_array_ops.broadcast_to(input, shape, name: name);
+
+ public Tensor check_numerics(Tensor tensor, string message, string name = null)
+ => gen_array_ops.check_numerics(tensor, message, name: name);
+
+ ///
+ /// Concatenates tensors along one dimension.
+ ///
+ /// A list of `Tensor` objects or a single `Tensor`.
+ ///
+ ///
+ /// A `Tensor` resulting from concatenation of the input tensors.
+ public Tensor concat(IEnumerable values, int axis, string name = "concat")
+ {
+ if (values.Count() == 1)
+ {
+ return tf_with(ops.name_scope(name), scope =>
+ {
+ var tensor = ops.convert_to_tensor(axis, name: "concat_dim", dtype: dtypes.int32);
+ Debug.Assert(tensor.shape.ndim == 0);
+ return identity(values.First(), name: scope);
+ });
+ }
+ return array_ops.concat(values.ToArray(), axis, name: name);
+ }
+
+ ///
+ /// Inserts a dimension of 1 into a tensor's shape.
+ ///
+ ///
+ ///
+ ///
+ ///
+ /// A `Tensor` with the same data as `input`, but its shape has an additional
+ /// dimension of size 1 added.
+ ///
+ public Tensor expand_dims(Tensor input, int axis = -1, string name = null)
+ => array_ops.expand_dims(input, axis, name);
+
+ ///
+ /// Creates a tensor filled with a scalar value.
+ ///
+ ///
+ ///
+ ///
+ ///
+ public Tensor fill(Tensor dims, T value, string name = null)
+ => gen_array_ops.fill(dims, ops.convert_to_tensor(value), name: name);
+
+ public Tensor fill(Shape dims, T value, string name = null)
+ => array_ops.fill(dims, value, name: name);
+
+ ///
+ /// Return a tensor with the same shape and contents as input.
+ ///
+ ///
+ ///
+ ///
+ public Tensor identity(Tensor input, string name = null)
+ => array_ops.identity(input, name: name);
+
+ ///
+ /// Gather slices from params axis axis according to indices.
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public Tensor gather(Tensor @params, Tensor indices, string name = null, int axis = 0)
+ => array_ops.gather(@params, indices, name: name, axis: ops.convert_to_tensor(axis));
+
+ ///
+ /// Gather slices from `params` into a Tensor with shape specified by `indices`.
+ ///
+ ///
+ ///
+ ///
+ ///
+ public Tensor gather_nd(Tensor @params, Tensor indices, string name = null)
+ => gen_array_ops.gather_nd(@params, indices, name: name);
+
+ ///
+ /// Return the elements, either from `x` or `y`, depending on the `condition`.
+ ///
+ ///
+ public Tensor where(Tensor condition, Tx x, Ty y, string name = null)
+ => array_ops.where(condition, x, y, name);
+
+ ///
+ /// Transposes `a`. Permutes the dimensions according to `perm`.
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public Tensor transpose(T1 a, Axis perm = null, string name = "transpose", bool conjugate = false)
+ => array_ops.transpose(a, perm, name, conjugate);
+
+ ///
+ /// Reverses specific dimensions of a tensor.
+ ///
+ ///
+ /// The indices of the dimensions to reverse. Must be in the range [-rank(tensor), rank(tensor)).
+ ///
+ ///
+ public Tensor reverse(Tensor tensor, Axis axis, string name = null)
+ {
+ if (axis.IsScalar)
+ {
+ axis = new Axis(axis.axis);
+ }
+ return array_ops.reverse(tensor, axis, name: name);
+ }
+
+ ///
+ /// Returns the rank of a tensor.
+ ///
+ ///
+ ///
+ /// Returns a 0-D `int32` `Tensor` representing the rank of `input`.
+ public Tensor rank(Tensor input, string name = null)
+ => array_ops.rank(input, name: name);
+
+ ///
+ /// Extracts a slice from a tensor.
+ ///
+ /// A `Tensor`.
+ /// An `int32` or `int64` `Tensor`.
+ /// An `int32` or `int64` `Tensor`.
+ /// A name for the operation (optional).
+ /// A `Tensor` the same type as `input`.
+ public Tensor slice(Tensor input, Tb[] begin, Ts[] size, string name = null)
+ => array_ops.slice(input, begin.Select(x => ops.convert_to_tensor(x)).ToArray(),
+ size.Select(x => ops.convert_to_tensor(x)).ToArray(), name: name);
+
+ public Tensor squeeze(Tensor input, int axis, string name = null, int squeeze_dims = -1)
+ => array_ops.squeeze(input, new[] { axis }, name);
+
+ public Tensor squeeze(Tensor input, int[] axis = null, string name = null, int squeeze_dims = -1)
+ => array_ops.squeeze(input, axis, name);
+
+ ///
+ /// Stacks a list of rank-`R` tensors into one rank-`(R+1)` tensor.
+ ///
+ ///
+ ///
+ ///
+ ///
+ public Tensor stack(object values, int axis = 0, string name = "stack")
+ => array_ops.stack(values, axis, name: name);
+
+ ///
+ /// Creates a tensor with all elements set to 1.
+ ///
+ ///
+ ///
+ /// A name for the operation (optional).
+ ///
+ /// if true, attempt to statically determine the shape of 'tensor' and
+ /// encode it as a constant.
+ ///
+ /// A `Tensor` with all elements set to 1.
+ public Tensor ones_like(Tensor tensor, TF_DataType dtype = TF_DataType.DtInvalid, string name = null, bool optimize = true)
+ => array_ops.ones_like(tensor, dtype: dtype, name: name, optimize: optimize);
+
+ public Tensor ones_like(NDArray nd, TF_DataType dtype = TF_DataType.DtInvalid, string name = null, bool optimize = true)
+ => array_ops.ones_like(nd, dtype: dtype, name: name, optimize: optimize);
+
+ public Tensor one_hot(Tensor indices, int depth,
+ Tensor on_value = null,
+ Tensor off_value = null,
+ TF_DataType dtype = TF_DataType.DtInvalid,
+ int axis = -1,
+ string name = null) => array_ops.one_hot(indices, ops.convert_to_tensor(depth), dtype: dtype, axis: axis, name: name);
+
+ ///
+ /// Pads a tensor
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public Tensor pad(Tensor tensor, Tensor paddings, string mode = "CONSTANT", string name = null, int constant_values = 0)
+ => array_ops.pad(tensor, paddings, mode: mode, name: name, constant_values: constant_values);
+
+ ///
+ /// A placeholder op that passes through `input` when its output is not fed.
+ ///
+ ///
+ /// A `Tensor`. The default value to produce when output is not fed.
+ ///
+ /// A `tf.Shape` or list of `int`s. The (possibly partial) shape of
+ /// the tensor.
+ ///
+ /// A name for the operation (optional).
+ /// A `Tensor`. Has the same type as `input`.
+ public Tensor placeholder_with_default(T input, int[] shape, string name = null)
+ => gen_array_ops.placeholder_with_default(ops.convert_to_tensor(input), shape, name: name);
+
+ ///
+ /// Returns the shape of a tensor.
+ ///
+ ///
+ ///
+ ///
+ ///
+ public Tensor shape(Tensor input, string name = null, TF_DataType out_type = TF_DataType.TF_INT32)
+ => array_ops.shape_internal(input, name, optimize: true, out_type: out_type);
+
+ ///
+ /// Stacks a list of rank-`R` tensors into one rank-`(R+1)` tensor.
+ ///
+ ///
+ ///
+ ///
+ /// A stacked `Tensor` with the same type as `values`.
+ public Tensor stack(Tensor[] values, int axis = 0, string name = "stack")
+ => array_ops.stack(values, axis: axis, name: name);
+
+ ///
+ /// Unpacks the given dimension of a rank-`R` tensor into rank-`(R-1)` tensors.
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public Tensor[] unstack(Tensor value, int? num = null, int axis = 0, string name = "unstack")
+ => array_ops.unstack(value, num: num, axis: axis, name: name);
+
+ ///
+ /// Creates a tensor with all elements set to zero.
+ ///
+ ///
+ ///
+ ///
+ ///
+ /// A `Tensor` with all elements set to zero.
+ public Tensor zeros_like(Tensor tensor, TF_DataType dtype = TF_DataType.DtInvalid, string name = null, bool optimize = true)
+ => array_ops.zeros_like(tensor, dtype: dtype, name: name, optimize: optimize);
+
+ public Tensor zeros_like(NDArray nd, TF_DataType dtype = TF_DataType.DtInvalid, string name = null, bool optimize = true)
+ => array_ops.zeros_like(nd, dtype: dtype, name: name, optimize: optimize);
+
+ ///
+ /// Stops gradient computation.
+ ///
+ ///
+ ///
+ ///
+ public Tensor stop_gradient(Tensor x, string name = null)
+ => gen_array_ops.stop_gradient(x, name: name);
+
+ public TensorArray TensorArray(TF_DataType dtype, int size = 0, bool dynamic_size = false,
+ bool clear_after_read = true, Shape? element_shape = null, bool colocate_with_first_write_call = true,
+ bool infer_shape = true)
+ => tf.executing_eagerly() ?
+ new _EagerTensorArray(dtype, size: constant_op.constant(size), dynamic_size: dynamic_size,
+ clear_after_read: clear_after_read, element_shape: element_shape, infer_shape: infer_shape,
+ colocate_with_first_write_call: colocate_with_first_write_call) :
+ new _GraphTensorArray(dtype, size: constant_op.constant(size), dynamic_size: dynamic_size,
+ clear_after_read: clear_after_read, element_shape: element_shape, infer_shape: infer_shape,
+ colocate_with_first_write_call: colocate_with_first_write_call);
+
+ public TensorArray TensorArray(TF_DataType dtype, Tensor size, bool dynamic_size = false,
+ bool clear_after_read = true, Shape? element_shape = null, bool colocate_with_first_write_call = true,
+ bool infer_shape = true)
+ => tf.executing_eagerly() ?
+ new _EagerTensorArray(dtype, size: size, dynamic_size: dynamic_size,
+ clear_after_read: clear_after_read, element_shape: element_shape, infer_shape: infer_shape,
+ colocate_with_first_write_call: colocate_with_first_write_call) :
+ new _GraphTensorArray(dtype, size: size, dynamic_size: dynamic_size,
+ clear_after_read: clear_after_read, element_shape: element_shape, infer_shape: infer_shape,
+ colocate_with_first_write_call: colocate_with_first_write_call);
+ }
+}
diff --git a/src/TensorFlowNET.Core/APIs/tf.audio.cs b/src/TensorFlowNET.Core/APIs/tf.audio.cs
new file mode 100644
index 000000000..573b11ec3
--- /dev/null
+++ b/src/TensorFlowNET.Core/APIs/tf.audio.cs
@@ -0,0 +1,37 @@
+/*****************************************************************************
+ Copyright 2021 Haiping Chen. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+******************************************************************************/
+
+using System.Collections.Generic;
+using Tensorflow.IO;
+
+namespace Tensorflow
+{
+ public partial class tensorflow
+ {
+ public AudioAPI audio { get; } = new AudioAPI();
+
+ public class AudioAPI
+ {
+ audio_ops audio_ops = new audio_ops();
+
+ public Tensors decode_wav(Tensor contents, int desired_channels = -1, int desired_samples = -1, string name = null)
+ => audio_ops.decode_wav(contents,
+ desired_channels: desired_channels,
+ desired_samples: desired_samples,
+ name: name);
+ }
+ }
+}
diff --git a/src/TensorFlowNET.Core/APIs/tf.autograph.cs b/src/TensorFlowNET.Core/APIs/tf.autograph.cs
new file mode 100644
index 000000000..55acac621
--- /dev/null
+++ b/src/TensorFlowNET.Core/APIs/tf.autograph.cs
@@ -0,0 +1,25 @@
+/*****************************************************************************
+ Copyright 2020 Haiping Chen. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+******************************************************************************/
+
+using Tensorflow.Graphs;
+
+namespace Tensorflow
+{
+ public partial class tensorflow
+ {
+ public AutoGraph autograph = new AutoGraph();
+ }
+}
diff --git a/src/TensorFlowNET.Core/APIs/tf.bitwise.cs b/src/TensorFlowNET.Core/APIs/tf.bitwise.cs
new file mode 100644
index 000000000..b05182447
--- /dev/null
+++ b/src/TensorFlowNET.Core/APIs/tf.bitwise.cs
@@ -0,0 +1,25 @@
+/*****************************************************************************
+ Copyright 2020 The TensorFlow.NET Authors. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+******************************************************************************/
+
+using Tensorflow.Operations;
+
+namespace Tensorflow
+{
+ public partial class tensorflow
+ {
+ public bitwise_ops bitwise = new bitwise_ops();
+ }
+}
diff --git a/src/TensorFlowNET.Core/APIs/tf.compat.cs b/src/TensorFlowNET.Core/APIs/tf.compat.cs
new file mode 100644
index 000000000..8a30badd9
--- /dev/null
+++ b/src/TensorFlowNET.Core/APIs/tf.compat.cs
@@ -0,0 +1,71 @@
+/*****************************************************************************
+ Copyright 2020 The TensorFlow.NET Authors. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+******************************************************************************/
+
+using Google.Protobuf;
+using System.Text;
+
+namespace Tensorflow
+{
+ public partial class tensorflow
+ {
+ public CompatApi compat { get; } = new CompatApi();
+
+ public class CompatApi
+ {
+ public CompatV1Api v1 { get; } = new CompatV1Api();
+
+ internal string as_text(string bytes_or_text, Encoding? encoding = null)
+ {
+ if(encoding is null) encoding = Encoding.UTF8;
+ return bytes_or_text;
+ }
+ internal string as_text(byte[] bytes_or_text, Encoding? encoding = null)
+ {
+ if(encoding is null) encoding = Encoding.UTF8;
+ return encoding.GetString(bytes_or_text);
+ }
+
+ internal string as_str(string bytes_or_text, Encoding? encoding = null)
+ {
+ return as_text(bytes_or_text, encoding);
+ }
+ internal string as_str(byte[] bytes_or_text, Encoding? encoding = null)
+ {
+ return as_text(bytes_or_text, encoding);
+ }
+
+ public ByteString as_bytes(ByteString bytes, Encoding encoding = null)
+ {
+ return bytes;
+ }
+ public ByteString as_bytes(byte[] bytes, Encoding encoding = null)
+ {
+ return ByteString.CopyFrom(bytes);
+ }
+ public ByteString as_bytes(string text, Encoding encoding = null)
+ {
+ if(encoding is null)
+ {
+ encoding = Encoding.UTF8;
+ }
+ return ByteString.CopyFrom(encoding.GetBytes(text));
+ }
+ }
+
+ public bool executing_eagerly()
+ => Context.executing_eagerly();
+ }
+}
diff --git a/src/TensorFlowNET.Core/APIs/tf.compat.v1.cs b/src/TensorFlowNET.Core/APIs/tf.compat.v1.cs
new file mode 100644
index 000000000..982e7ccce
--- /dev/null
+++ b/src/TensorFlowNET.Core/APIs/tf.compat.v1.cs
@@ -0,0 +1,60 @@
+/*****************************************************************************
+ Copyright 2020 The TensorFlow.NET Authors. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+******************************************************************************/
+
+using System.Collections.Generic;
+using static Tensorflow.Binding;
+
+namespace Tensorflow
+{
+ public class CompatV1Api
+ {
+ public void disable_eager_execution()
+ => tf.Context.graph_mode();
+
+ public IVariableV1 get_variable(string name,
+ Shape shape = null,
+ TF_DataType dtype = TF_DataType.DtInvalid,
+ object initializer = null, // IInitializer or Tensor
+ bool? trainable = null,
+ List collections = null,
+ bool? use_resource = null,
+ bool validate_shape = true,
+ VariableSynchronization synchronization = VariableSynchronization.Auto,
+ VariableAggregation aggregation = VariableAggregation.None)
+ {
+ var scope = Tensorflow.variable_scope.get_variable_scope();
+ var store = Tensorflow.variable_scope._get_default_variable_store();
+ return scope.get_variable(store,
+ name,
+ shape: shape,
+ dtype: dtype,
+ use_resource: use_resource,
+ validate_shape: validate_shape,
+ initializer: initializer,
+ trainable: trainable,
+ collections: collections);
+ }
+
+ public Operation global_variables_initializer()
+ {
+ var g = variables.global_variables();
+ return variables.variables_initializer(g.ToArray());
+ }
+
+ public Session Session()
+ => new Session().as_default();
+ }
+}
diff --git a/src/TensorFlowNET.Core/APIs/tf.config.cs b/src/TensorFlowNET.Core/APIs/tf.config.cs
new file mode 100644
index 000000000..3c30ffb48
--- /dev/null
+++ b/src/TensorFlowNET.Core/APIs/tf.config.cs
@@ -0,0 +1,32 @@
+/*****************************************************************************
+ Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+******************************************************************************/
+
+using Tensorflow.Contexts;
+using Tensorflow.Framework;
+
+namespace Tensorflow
+{
+ public partial class tensorflow
+ {
+ ///
+ /// Public API for tf.debugging namespace
+ /// https://www.tensorflow.org/api_docs/python/tf/debugging
+ /// More debugging instructions
+ /// https://developer.ibm.com/technologies/artificial-intelligence/tutorials/debug-tensorflow/
+ ///
+ public ConfigImpl config => new ConfigImpl();
+ }
+}
diff --git a/src/TensorFlowNET.Core/APIs/tf.control_flow.cs b/src/TensorFlowNET.Core/APIs/tf.control_flow.cs
new file mode 100644
index 000000000..cd5a71e50
--- /dev/null
+++ b/src/TensorFlowNET.Core/APIs/tf.control_flow.cs
@@ -0,0 +1,73 @@
+/*****************************************************************************
+ Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+******************************************************************************/
+
+using System;
+
+namespace Tensorflow
+{
+ public partial class tensorflow
+ {
+ public Tensor cond(Tensor pred,
+ Tensor true_value,
+ Tensor false_false)
+ => control_flow_ops.cond(pred, () => true_value, () => false_false);
+
+ public Tensor cond(Tensor pred,
+ Func true_fn = null,
+ Func false_fn = null,
+ string name = null)
+ => control_flow_ops.cond(pred, true_fn, false_fn, name: name);
+
+ ///
+ /// Create an op that groups multiple operations.
+ ///
+ ///
+ ///
+ ///
+ /// An Operation that executes all its inputs.
+ public Operation group(T[] inputs, string name = null) where T : ITensorOrOperation
+ => control_flow_ops.group(inputs, name: name);
+
+ public Tensor while_loop(Func cond,
+ Func body,
+ Tensor loop_vars,
+ int parallel_iterations = 10)
+ {
+ Func cond1 = x
+ => cond(x[0]);
+
+ Func body1 = x
+ => new[] { body(x[0]) };
+
+ var results = control_flow_ops.while_loop(cond1,
+ body1,
+ new[] { loop_vars });
+ return results[0];
+ }
+
+ public Tensor[] while_loop(Func cond,
+ Func body,
+ Tensors loop_vars,
+ int parallel_iterations = 10,
+ string name = null)
+ => control_flow_ops.while_loop(cond, body, loop_vars,
+ parallel_iterations: parallel_iterations,
+ name: name);
+
+ public _ControlDependenciesController control_dependencies(ITensorOrOperation[] control_inputs)
+ => ops.control_dependencies(control_inputs);
+ }
+}
diff --git a/src/TensorFlowNET.Core/APIs/tf.data.cs b/src/TensorFlowNET.Core/APIs/tf.data.cs
new file mode 100644
index 000000000..6c41a8393
--- /dev/null
+++ b/src/TensorFlowNET.Core/APIs/tf.data.cs
@@ -0,0 +1,31 @@
+/*****************************************************************************
+ Copyright 2020 The TensorFlow.NET Authors. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+******************************************************************************/
+
+namespace Tensorflow
+{
+ public partial class tensorflow
+ {
+ public DataOps data { get; } = new DataOps();
+
+ public class DataOps
+ {
+ public int AUTOTUNE = -1;
+ public int INFINITE_CARDINALITY = -1;
+ public int UNKNOWN_CARDINALITY = -2;
+ public DatasetManager Dataset { get; } = new DatasetManager();
+ }
+ }
+}
diff --git a/src/TensorFlowNET.Core/APIs/tf.data_flow.cs b/src/TensorFlowNET.Core/APIs/tf.data_flow.cs
new file mode 100644
index 000000000..e4c0a83cc
--- /dev/null
+++ b/src/TensorFlowNET.Core/APIs/tf.data_flow.cs
@@ -0,0 +1,43 @@
+/*****************************************************************************
+ Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+******************************************************************************/
+
+namespace Tensorflow
+{
+ public partial class tensorflow
+ {
+ ///
+ /// Interleave the values from the data tensors into a single tensor.
+ ///
+ ///
+ ///
+ ///
+ ///
+ public Tensor dynamic_stitch(Tensor[] indices, Tensor[] data, string name = null)
+ => gen_data_flow_ops.dynamic_stitch(indices, data, name: name);
+
+ ///
+ /// Partitions `data` into `num_partitions` tensors using indices from `partitions`.
+ ///
+ ///
+ ///
+ /// The number of partitions to output.
+ ///
+ ///
+ public Tensor[] dynamic_partition(Tensor data, Tensor partitions, int num_partitions,
+ string name = null)
+ => gen_data_flow_ops.dynamic_partition(data, partitions, num_partitions, name: name);
+ }
+}
diff --git a/src/TensorFlowNET.Core/APIs/tf.debugging.cs b/src/TensorFlowNET.Core/APIs/tf.debugging.cs
new file mode 100644
index 000000000..b3b3529e4
--- /dev/null
+++ b/src/TensorFlowNET.Core/APIs/tf.debugging.cs
@@ -0,0 +1,35 @@
+/*****************************************************************************
+ Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+******************************************************************************/
+
+using Tensorflow.Debugging;
+using static Tensorflow.Binding;
+
+namespace Tensorflow
+{
+ public partial class tensorflow
+ {
+ ///
+ /// Public API for tf.debugging namespace
+ /// https://www.tensorflow.org/api_docs/python/tf/debugging
+ /// More debugging instructions
+ /// https://developer.ibm.com/technologies/artificial-intelligence/tutorials/debug-tensorflow/
+ ///
+ public DebugImpl debugging => new DebugImpl();
+
+ public void print(Tensor input)
+ => tf.logging.print_v2(input);
+ }
+}
diff --git a/src/TensorFlowNET.Core/APIs/tf.distributions.cs b/src/TensorFlowNET.Core/APIs/tf.distributions.cs
new file mode 100644
index 000000000..c9ccad917
--- /dev/null
+++ b/src/TensorFlowNET.Core/APIs/tf.distributions.cs
@@ -0,0 +1,32 @@
+/*****************************************************************************
+ Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+******************************************************************************/
+
+namespace Tensorflow
+{
+ public partial class tensorflow
+ {
+ public distributions_internal distributions { get; } = new distributions_internal();
+
+ public class distributions_internal
+ {
+ public Normal Normal(Tensor loc,
+ Tensor scale,
+ bool validate_args = false,
+ bool allow_nan_stats = true,
+ string name = "Normal") => new Normal(loc, scale, validate_args = false, allow_nan_stats = true, "Normal");
+ }
+ }
+}
diff --git a/src/TensorFlowNET.Core/APIs/tf.gradients.cs b/src/TensorFlowNET.Core/APIs/tf.gradients.cs
new file mode 100644
index 000000000..d722cb143
--- /dev/null
+++ b/src/TensorFlowNET.Core/APIs/tf.gradients.cs
@@ -0,0 +1,98 @@
+/*****************************************************************************
+ Copyright 2020 The TensorFlow.NET Authors. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+******************************************************************************/
+
+using System.Collections.Generic;
+using Tensorflow.Gradients;
+
+namespace Tensorflow
+{
+ public partial class tensorflow
+ {
+ GradientTape _tapeSet;
+
+ ///
+ /// Record operations for automatic differentiation.
+ ///
+ ///
+ ///
+ /// Tape set
+ public GradientTape GradientTape(bool persistent = false,
+ bool watch_accessed_variables = true)
+ {
+ var tape = _tapeSet.PushTape(persistent: persistent,
+ watch_accessed_variables: watch_accessed_variables);
+ tape.StartRecord();
+ return _tapeSet;
+ }
+
+ public Stack GetTapeSet()
+ => _tapeSet.GetTapeSet();
+
+ public Tensor[] gradients(Tensor[] ys,
+ Tensor[] xs,
+ Tensor[] grad_ys = null,
+ string name = "gradients",
+ bool colocate_gradients_with_ops = false,
+ bool gate_gradients = false,
+ int? aggregation_method = null,
+ Tensor[] stop_gradients = null)
+ {
+ return gradients_util._GradientsHelper(ys,
+ xs,
+ grad_ys,
+ name,
+ colocate_gradients_with_ops,
+ gate_gradients,
+ stop_gradients: stop_gradients);
+ }
+
+ public Tensor[] gradients(Tensor ys,
+ Tensor[] xs,
+ Tensor[] grad_ys = null,
+ string name = "gradients",
+ bool colocate_gradients_with_ops = false,
+ bool gate_gradients = false,
+ int? aggregation_method = null,
+ Tensor[] stop_gradients = null)
+ {
+ return gradients_util._GradientsHelper(new Tensor[] { ys },
+ xs,
+ grad_ys,
+ name,
+ colocate_gradients_with_ops,
+ gate_gradients,
+ stop_gradients: stop_gradients);
+ }
+
+ public Tensor[] gradients(Tensor ys,
+ Tensor xs,
+ Tensor[] grad_ys = null,
+ string name = "gradients",
+ bool colocate_gradients_with_ops = false,
+ bool gate_gradients = false,
+ int? aggregation_method = null,
+ Tensor[] stop_gradients = null)
+ {
+ return gradients_util._GradientsHelper(new Tensor[] { ys },
+ new Tensor[] { xs },
+ grad_ys,
+ name,
+ colocate_gradients_with_ops,
+ gate_gradients,
+ stop_gradients: stop_gradients);
+ }
+ }
+}
diff --git a/src/TensorFlowNET.Core/APIs/tf.graph.cs b/src/TensorFlowNET.Core/APIs/tf.graph.cs
new file mode 100644
index 000000000..c1b033aee
--- /dev/null
+++ b/src/TensorFlowNET.Core/APIs/tf.graph.cs
@@ -0,0 +1,43 @@
+/*****************************************************************************
+ Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+******************************************************************************/
+
+using static Tensorflow.ops;
+
+namespace Tensorflow
+{
+ public partial class tensorflow
+ {
+ public graph_util_impl graph_util { get; } = new graph_util_impl();
+ public GraphTransformer graph_transforms { get; } = new GraphTransformer();
+ public GraphKeys GraphKeys { get; } = new GraphKeys();
+
+ public void reset_default_graph()
+ => ops.reset_default_graph();
+
+ public Graph get_default_graph()
+ => ops.get_default_graph();
+
+ public Graph peak_default_graph()
+ => ops.peak_default_graph();
+
+ ///
+ /// Creates a new graph.
+ ///
+ ///Has no interaction with graph defaulting. Equivalent to new Graph();
+ public Graph Graph()
+ => new Graph();
+ }
+}
\ No newline at end of file
diff --git a/src/TensorFlowNET.Core/APIs/tf.image.cs b/src/TensorFlowNET.Core/APIs/tf.image.cs
new file mode 100644
index 000000000..41ef52967
--- /dev/null
+++ b/src/TensorFlowNET.Core/APIs/tf.image.cs
@@ -0,0 +1,376 @@
+/*****************************************************************************
+ Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+******************************************************************************/
+
+using OneOf.Types;
+using System;
+using System.Buffers.Text;
+using Tensorflow.Contexts;
+using static Tensorflow.Binding;
+
+namespace Tensorflow
+{
+ public partial class tensorflow
+ {
+ public image_internal image = new image_internal();
+
+ public class image_internal
+ {
+ public Tensor random_flip_up_down(Tensor image, int seed = 0)
+ => image_ops_impl.random_flip_up_down(image, seed);
+
+ public Tensor random_flip_left_right(Tensor image, int seed = 0)
+ => image_ops_impl.random_flip_left_right(image, seed);
+
+ public Tensor flip_left_right(Tensor image)
+ => image_ops_impl.flip_left_right(image);
+
+ public Tensor flip_up_down(Tensor image)
+ => image_ops_impl.flip_up_down(image);
+
+ public Tensor rot90(Tensor image, int k = 1, string name = null)
+ => image_ops_impl.rot90(image, k, name);
+
+ public Tensor transpose(Tensor image, string name = null)
+ => image_ops_impl.transpose(image, name);
+
+ public Tensor central_crop(Tensor image, float central_fraction)
+ => image_ops_impl.central_crop(image, central_fraction);
+
+ public Tensor pad_to_bounding_box(Tensor image, int offset_height, int offset_width, int target_height, int target_width)
+ => image_ops_impl.pad_to_bounding_box(image, offset_height, offset_width, target_height, target_width);
+
+ public Tensor crop_to_bounding_box(Tensor image, int offset_height, int offset_width, int target_height, int target_width)
+ => image_ops_impl.crop_to_bounding_box(image, offset_height, offset_width, target_height, target_width);
+
+ public Tensor resize_image_with_crop_or_pad(Tensor image, object target_height, object target_width)
+ => image_ops_impl.resize_image_with_crop_or_pad(image, target_height, target_width);
+
+ public Tensor resize_images(Tensor images, Tensor size, string method = ResizeMethod.BILINEAR, bool preserve_aspect_ratio = false, bool antialias = false,
+ string name = null)
+ => image_ops_impl.resize_images(images, size, method, preserve_aspect_ratio, antialias, name);
+
+ public Tensor resize_images_v2(Tensor images, Shape size, string method = ResizeMethod.BILINEAR, bool preserve_aspect_ratio = false, bool antialias = false,
+ string name = null)
+ => image_ops_impl.resize_images_v2(images, size, method, preserve_aspect_ratio, antialias, name);
+
+ public Tensor resize_images_v2(Tensor images, Tensor size, string method = ResizeMethod.BILINEAR, bool preserve_aspect_ratio = false, bool antialias = false,
+ string name = null)
+ => image_ops_impl.resize_images_v2(images, size, method, preserve_aspect_ratio, antialias, name);
+
+ public Tensor resize_images_with_pad(Tensor image, int target_height, int target_width, string method, bool antialias)
+ => image_ops_impl.resize_images_with_pad(image, target_height, target_width, method, antialias);
+
+ public Tensor per_image_standardization(Tensor image)
+ => image_ops_impl.per_image_standardization(image);
+
+ public Tensor random_brightness(Tensor image, float max_delta, int seed = 0)
+ => image_ops_impl.random_brightness(image, max_delta, seed);
+
+ public Tensor random_contrast(Tensor image, float lower, float upper, int seed = 0)
+ => image_ops_impl.random_contrast(image, lower, upper, seed);
+
+ public Tensor adjust_brightness(Tensor image, Tensor delta)
+ => image_ops_impl.adjust_brightness(image, delta);
+
+ public Tensor adjust_contrast(Tensor images, Tensor contrast_factor)
+ => image_ops_impl.adjust_contrast(images, contrast_factor);
+
+ public Tensor adjust_gamma(Tensor image, int gamma = 1, int gain = 1)
+ => image_ops_impl.adjust_gamma(image, gamma, gain);
+
+ public Tensor rgb_to_grayscale(Tensor images, string name = null)
+ => image_ops_impl.rgb_to_grayscale(images, name);
+
+ public Tensor grayscale_to_rgb(Tensor images, string name = null)
+ => image_ops_impl.grayscale_to_rgb(images, name);
+
+ public Tensor random_hue(Tensor image, float max_delta, int seed = 0)
+ => image_ops_impl.random_hue(image, max_delta, seed);
+
+ public Tensor adjust_hue(Tensor image, Tensor delta, string name = null)
+ => image_ops_impl.adjust_hue(image, delta, name);
+
+ public Tensor random_jpeg_quality(Tensor image, float min_jpeg_quality, float max_jpeg_quality, int seed = 0)
+ => image_ops_impl.random_jpeg_quality(image, min_jpeg_quality, max_jpeg_quality, seed);
+
+ public Tensor adjust_jpeg_quality(Tensor image, Tensor jpeg_quality, string name = null)
+ => image_ops_impl.adjust_jpeg_quality(image, jpeg_quality, name);
+
+ public Tensor random_saturation(Tensor image, float lower, float upper, int seed = 0)
+ => image_ops_impl.random_saturation(image, lower, upper, seed);
+
+ public Tensor adjust_saturation(Tensor image, Tensor saturation_factor, string name = null)
+ => image_ops_impl.adjust_saturation(image, saturation_factor, name);
+
+ public Tensor total_variation(Tensor images, string name = null)
+ => image_ops_impl.total_variation(images, name);
+
+ public (Tensor, Tensor, Tensor) sample_distorted_bounding_box(Tensor image_size, Tensor bounding_boxes,
+ int seed = 0,
+ Tensor min_object_covered = null,
+ float[] aspect_ratio_range = null,
+ float[] area_range = null,
+ int max_attempts = 100,
+ bool use_image_if_no_bounding_boxes = false,
+ string name = null)
+ => image_ops_impl.sample_distorted_bounding_box_v2(image_size, bounding_boxes, seed, min_object_covered, aspect_ratio_range,
+ area_range, max_attempts, use_image_if_no_bounding_boxes, name);
+
+ public Tensor non_max_suppression(Tensor boxes, Tensor scores, Tensor max_output_size, float iou_threshold = 0.5f,
+ float score_threshold = -1f / 0f, /*float soft_nms_sigma = 0.0f,*/ string name = null)
+ => image_ops_impl.non_max_suppression(boxes, scores, max_output_size, iou_threshold, score_threshold, name);
+
+ public Tensor non_max_suppression_with_overlaps(Tensor overlaps, Tensor scores, Tensor max_output_size,
+ float overlap_threshold = 0.5f, float score_threshold = -1 / 0f, string name = null)
+ => image_ops_impl.non_max_suppression_with_overlaps(overlaps, scores, max_output_size, overlap_threshold, score_threshold, name);
+
+ public Tensor rgb_to_yiq(Tensor images)
+ => image_ops_impl.rgb_to_yiq(images);
+
+ public Tensor yiq_to_rgb(Tensor images)
+ => image_ops_impl.yiq_to_rgb(images);
+
+ public Tensor rgb_to_yuv(Tensor images)
+ => image_ops_impl.rgb_to_yuv(images);
+
+ public Tensor yuv_to_rgb(Tensor images)
+ => image_ops_impl.yuv_to_rgb(images);
+
+ public Tensor psnr(Tensor a, Tensor b, Tensor max_val, string name = null)
+ => image_ops_impl.psnr(a, b, max_val, name);
+
+ public Tensor ssim(Tensor img1, Tensor img2, float max_val = 1f, float filter_size = 11f, float filter_sigma = 1.5f,
+ float k1 = 0.01f, float k2 = 0.03f)
+ => image_ops_impl.ssim(img1, img2, max_val, filter_size, filter_sigma, k1, k2);
+
+ public Tensor ssim_multiscale(Tensor img1, Tensor img2, float max_val, float[] power_factors = null, float filter_size = 11f,
+ float filter_sigma = 1.5f, float k1 = 0.01f, float k2 = 0.03f)
+ => image_ops_impl.ssim_multiscale(img1, img2, max_val, power_factors, filter_size, filter_sigma, k1, k2);
+
+ public (Tensor, Tensor) image_gradients(Tensor image)
+ => image_ops_impl.image_gradients(image);
+
+ public Tensor sobel_edges(Tensor image)
+ => image_ops_impl.sobel_edges(image);
+
+ ///
+ /// Adjust contrast of RGB or grayscale images.
+ ///
+ /// Images to adjust. At least 3-D.
+ ///
+ /// A float multiplier for adjusting contrast.
+ /// The contrast-adjusted image or images.
+ public Tensor adjust_contrast(Tensor images, float contrast_factor, string name = null)
+ => gen_image_ops.adjust_contrastv2(images, contrast_factor, name);
+
+ ///
+ /// Adjust hue of RGB images.
+ ///
+ /// RGB image or images. The size of the last dimension must be 3.
+ /// float. How much to add to the hue channel.
+ /// A name for this operation (optional).
+ /// Adjusted image(s), same shape and DType as `image`.
+ /// if `delta` is not in the interval of `[-1, 1]`.
+ public Tensor adjust_hue(Tensor images, float delta, string name = null)
+ {
+ if (tf.Context.executing_eagerly())
+ {
+ if (delta < -1f || delta > 1f)
+ throw new ValueError("delta must be in the interval [-1, 1]");
+ }
+ return gen_image_ops.adjust_hue(images, delta, name: name);
+ }
+
+ ///
+ /// Adjust saturation of RGB images.
+ ///
+ /// RGB image or images. The size of the last dimension must be 3.
+ /// float. Factor to multiply the saturation by.
+ /// A name for this operation (optional).
+ /// Adjusted image(s), same shape and DType as `image`.
+ public Tensor adjust_saturation(Tensor image, float saturation_factor, string name = null)
+ => gen_image_ops.adjust_saturation(image, saturation_factor, name);
+
+ ///
+ /// Greedily selects a subset of bounding boxes in descending order of score.
+ ///
+ ///
+ /// A 4-D float `Tensor` of shape `[batch_size, num_boxes, q, 4]`. If `q`
+ /// is 1 then same boxes are used for all classes otherwise, if `q` is equal
+ /// to number of classes, class-specific boxes are used.
+ ///
+ ///
+ /// A 3-D float `Tensor` of shape `[batch_size, num_boxes, num_classes]`
+ /// representing a single score corresponding to each box(each row of boxes).
+ ///
+ ///
+ /// A scalar integer `Tensor` representing the
+ /// maximum number of boxes to be selected by non-max suppression per class
+ ///
+ ///
+ /// A int32 scalar representing maximum number of boxes retained
+ /// over all classes.Note that setting this value to a large number may
+ /// result in OOM error depending on the system workload.
+ ///
+ ///
+ /// A float representing the threshold for deciding whether boxes
+ /// overlap too much with respect to IOU.
+ ///
+ ///
+ /// A float representing the threshold for deciding when to
+ /// remove boxes based on score.
+ ///
+ ///
+ /// If false, the output nmsed boxes, scores and classes are
+ /// padded/clipped to `max_total_size`. If true, the output nmsed boxes, scores and classes are padded to be of length `max_size_per_class`*`num_classes`,
+ /// unless it exceeds `max_total_size` in which case it is clipped to `max_total_size`. Defaults to false.
+ ///
+ ///
+ /// If true, the coordinates of output nmsed boxes will be clipped
+ /// to[0, 1]. If false, output the box coordinates as it is. Defaults to true.
+ ///
+ ///
+ /// 'nmsed_boxes': A [batch_size, max_detections, 4] float32 tensor containing the non-max suppressed boxes.
+ /// 'nmsed_scores': A [batch_size, max_detections] float32 tensor containing the scores for the boxes.
+ /// 'nmsed_classes': A [batch_size, max_detections] float32 tensor containing the class for boxes.
+ /// 'valid_detections': A [batch_size] int32 tensor indicating the number of
+ /// valid detections per batch item. Only the top valid_detections[i] entries
+ /// in nms_boxes[i], nms_scores[i] and nms_class[i] are valid. The rest of the
+ /// entries are zero paddings.
+ ///
+ public (Tensor, Tensor, Tensor, Tensor) combined_non_max_suppression(
+ Tensor boxes,
+ Tensor scores,
+ int max_output_size_per_class,
+ int max_total_size,
+ float iou_threshold,
+ float score_threshold,
+ bool pad_per_class = false,
+ bool clip_boxes = true)
+ {
+ var iou_threshold_t = ops.convert_to_tensor(iou_threshold, TF_DataType.TF_FLOAT, name: "iou_threshold");
+ var score_threshold_t = ops.convert_to_tensor(score_threshold, TF_DataType.TF_FLOAT, name: "score_threshold");
+ var max_total_size_t = ops.convert_to_tensor(max_total_size);
+ var max_output_size_per_class_t = ops.convert_to_tensor(max_output_size_per_class);
+ return gen_image_ops.combined_non_max_suppression(boxes, scores, max_output_size_per_class_t, max_total_size_t,
+ iou_threshold_t, score_threshold_t, pad_per_class, clip_boxes);
+ }
+
+ ///
+ /// Extracts crops from the input image tensor and resizes them using bilinear sampling or nearest neighbor sampling (possibly with aspect ratio change) to a common output size specified by crop_size. This is more general than the crop_to_bounding_box op which extracts a fixed size slice from the input image and does not allow resizing or aspect ratio change.
+ /// Returns a tensor with crops from the input image at positions defined at the bounding box locations in boxes.The cropped boxes are all resized(with bilinear or nearest neighbor interpolation) to a fixed size = [crop_height, crop_width].The result is a 4 - D tensor[num_boxes, crop_height, crop_width, depth].The resizing is corner aligned. In particular, if boxes = [[0, 0, 1, 1]], the method will give identical results to using tf.image.resize_bilinear() or tf.image.resize_nearest_neighbor() (depends on the method argument) with align_corners = True.
+ ///
+ /// A Tensor. Must be one of the following types: uint8, uint16, int8, int16, int32, int64, half, float32, float64. A 4-D tensor of shape [batch, image_height, image_width, depth]. Both image_height and image_width need to be positive.
+ /// A Tensor of type float32. A 2-D tensor of shape [num_boxes, 4]. The i-th row of the tensor specifies the coordinates of a box in the box_ind[i] image and is specified in normalized coordinates [y1, x1, y2, x2]. A normalized coordinate value of y is mapped to the image coordinate at y * (image_height - 1), so as the [0, 1] interval of normalized image height is mapped to [0, image_height - 1] in image height coordinates. We do allow y1 > y2, in which case the sampled crop is an up-down flipped version of the original image. The width dimension is treated similarly. Normalized coordinates outside the [0, 1] range are allowed, in which case we use extrapolation_value to extrapolate the input image values.
+ /// A Tensor of type int32. A 1-D tensor of shape [num_boxes] with int32 values in [0, batch). The value of box_ind[i] specifies the image that the i-th box refers to.
+ /// A Tensor of type int32. A 1-D tensor of 2 elements, size = [crop_height, crop_width]. All cropped image patches are resized to this size. The aspect ratio of the image content is not preserved. Both crop_height and crop_width need to be positive.
+ /// An optional string from: "bilinear", "nearest". Defaults to "bilinear". A string specifying the sampling method for resizing. It can be either "bilinear" or "nearest" and default to "bilinear". Currently two sampling methods are supported: Bilinear and Nearest Neighbor.
+ /// An optional float. Defaults to 0. Value used for extrapolation, when applicable.
+ /// A name for the operation (optional).
+ /// A 4-D tensor of shape [num_boxes, crop_height, crop_width, depth].
+ public Tensor crop_and_resize(Tensor image, Tensor boxes, Tensor box_ind, Tensor crop_size, string method = "bilinear", float extrapolation_value = 0f, string name = null) =>
+ gen_image_ops.crop_and_resize(image, boxes, box_ind, crop_size, method, extrapolation_value, name);
+
+ public Tensor decode_jpeg(Tensor contents,
+ int channels = 0,
+ int ratio = 1,
+ bool fancy_upscaling = true,
+ bool try_recover_truncated = false,
+ int acceptable_fraction = 1,
+ string dct_method = "",
+ string name = null)
+ => gen_image_ops.decode_jpeg(contents, channels: channels, ratio: ratio,
+ fancy_upscaling: fancy_upscaling, try_recover_truncated: try_recover_truncated,
+ acceptable_fraction: acceptable_fraction, dct_method: dct_method);
+
+ public Tensor extract_glimpse(Tensor input, Tensor size, Tensor offsets, bool centered = true, bool normalized = true,
+ bool uniform_noise = true, string name = null)
+ => image_ops_impl.extract_glimpse(input, size, offsets, centered, normalized, uniform_noise, name);
+
+ public (Tensor, Tensor, Tensor, Tensor) combined_non_max_suppression(Tensor boxes, Tensor scores, Tensor max_output_size_per_class,
+ Tensor max_total_size, float iou_threshold = 0.5f, float score_threshold = -1f / 0f, bool pad_per_class = false, bool clip_boxes = true,
+ string name = null)
+ => image_ops_impl.combined_non_max_suppression(boxes, scores, max_output_size_per_class, max_total_size, iou_threshold, score_threshold,
+ pad_per_class, clip_boxes, name);
+
+ public (Tensor, Tensor) non_max_suppression_padded(Tensor boxes, Tensor scores, Tensor max_output_size,
+ float iou_threshold = 0.5f,
+ float score_threshold = -1f / 0f,
+ bool pad_to_max_output_size = false,
+ string name = null,
+ bool sorted_input = false,
+ bool canonicalized_coordinates = false,
+ int tile_size = 512)
+ => image_ops_impl.non_max_suppression_padded(boxes, scores, max_output_size, iou_threshold, score_threshold, pad_to_max_output_size,
+ name, sorted_input, canonicalized_coordinates, tile_size);
+
+ public Tensor resize(Tensor image, Shape size, string method = ResizeMethod.BILINEAR)
+ => image_ops_impl.resize_images_v2(image, size, method: method);
+
+ public Tensor resize(Tensor image, Tensor size, string method = ResizeMethod.BILINEAR)
+ => image_ops_impl.resize_images_v2(image, size, method: method);
+
+ public Tensor resize_bilinear(Tensor images, Tensor size, bool align_corners = false, bool half_pixel_centers = false, string name = null)
+ => gen_image_ops.resize_bilinear(images, size, align_corners: align_corners, half_pixel_centers: half_pixel_centers, name: name);
+
+ public Tensor resize_images(Tensor images, Tensor size, string method = ResizeMethod.BILINEAR,
+ bool preserve_aspect_ratio = false, string name = null)
+ => image_ops_impl.resize_images(images, size, method: method,
+ preserve_aspect_ratio: preserve_aspect_ratio, name: name);
+
+ public Tensor convert_image_dtype(Tensor image, TF_DataType dtype, bool saturate = false, string name = null)
+ => gen_image_ops.convert_image_dtype(image, dtype, saturate: saturate, name: name);
+
+ public Tensor decode_image(Tensor contents, int channels = 0, TF_DataType dtype = TF_DataType.TF_UINT8,
+ string name = null, bool expand_animations = true)
+ => image_ops_impl.decode_image(contents, channels: channels, dtype: dtype,
+ name: name, expand_animations: expand_animations);
+
+ public Tensor encode_png(Tensor contents, string name = null)
+ => image_ops_impl.encode_png(contents, name: name);
+
+ public Tensor encode_jpeg(Tensor contents, string name = null)
+ => image_ops_impl.encode_jpeg(contents, name: name);
+
+
+ ///
+ /// Convenience function to check if the 'contents' encodes a JPEG image.
+ ///
+ ///
+ ///
+ ///
+ public Tensor is_jpeg(Tensor contents, string name = null)
+ => image_ops_impl.is_jpeg(contents, name: name);
+
+ ///
+ /// Resize `images` to `size` using nearest neighbor interpolation.
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public Tensor resize_nearest_neighbor(Tensor images, Tsize size, bool align_corners = false,
+ string name = null, bool half_pixel_centers = false)
+ => image_ops_impl.resize_nearest_neighbor(images, size, align_corners: align_corners,
+ name: name, half_pixel_centers: half_pixel_centers);
+
+ public Tensor draw_bounding_boxes(Tensor images, Tensor boxes, Tensor colors = null, string name = null)
+ => image_ops_impl.draw_bounding_boxes(images, boxes, colors, name);
+ }
+ }
+}
diff --git a/src/TensorFlowNET.Core/APIs/tf.init.cs b/src/TensorFlowNET.Core/APIs/tf.init.cs
new file mode 100644
index 000000000..8635f6620
--- /dev/null
+++ b/src/TensorFlowNET.Core/APIs/tf.init.cs
@@ -0,0 +1,104 @@
+/*****************************************************************************
+ Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+******************************************************************************/
+
+using Tensorflow.Operations.Initializers;
+
+namespace Tensorflow
+{
+ public partial class tensorflow
+ {
+ public InitializersImpl initializers { get; } = new InitializersImpl();
+
+ public IInitializer constant_initializer(T value, TF_DataType dtype = TF_DataType.TF_FLOAT, bool verify_shape = false)
+ => new Constant(value, dtype: dtype, verify_shape: verify_shape);
+ public IInitializer zeros_initializer => new Zeros();
+ public IInitializer ones_initializer => new Ones();
+ public IInitializer glorot_uniform_initializer => new GlorotUniform();
+ public IInitializer random_uniform_initializer => new RandomUniform();
+ public IInitializer orthogonal_initializer => new Orthogonal();
+
+ public variable_scope variable_scope(string name,
+ string default_name = null,
+ Tensor[] values = null,
+ bool? reuse = null,
+ bool auxiliary_name_scope = true) => new variable_scope(name,
+ default_name,
+ values,
+ reuse: reuse,
+ auxiliary_name_scope: auxiliary_name_scope);
+
+ public variable_scope variable_scope(VariableScope scope,
+ string default_name = null,
+ Tensor[] values = null,
+ bool? reuse = null,
+ bool auxiliary_name_scope = true) => new variable_scope(scope,
+ default_name,
+ values,
+ reuse: reuse,
+ auxiliary_name_scope: auxiliary_name_scope);
+
+ public IInitializer truncated_normal_initializer(float mean = 0.0f,
+ float stddev = 1.0f,
+ int? seed = null,
+ TF_DataType dtype = TF_DataType.DtInvalid) => new TruncatedNormal(mean: mean,
+ stddev: stddev,
+ seed: seed,
+ dtype: dtype);
+
+ public IInitializer random_normal_initializer(float mean = 0.0f,
+ float stddev = 1.0f,
+ int? seed = null,
+ TF_DataType dtype = TF_DataType.DtInvalid) => new RandomNormal(mean: mean,
+ stddev: stddev,
+ seed: seed,
+ dtype: dtype);
+
+ ///
+ /// Initializer capable of adapting its scale to the shape of weights tensors.
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public IInitializer variance_scaling_initializer(float factor = 1.0f,
+ string mode = "fan_in",
+ string distribution = "truncated_normal",
+ int? seed = null,
+ TF_DataType dtype = TF_DataType.TF_FLOAT) => new VarianceScaling(
+ scale: factor,
+ mode: mode,
+ distribution: distribution,
+ seed: seed,
+ dtype: dtype);
+
+ public class InitializersImpl
+ {
+ public IInitializer random_normal_initializer(float mean = 0.0f,
+ float stddev = 0.05f,
+ int? seed = null,
+ TF_DataType dtype = TF_DataType.TF_FLOAT) => new RandomNormal(mean: mean,
+ stddev: stddev,
+ seed: seed,
+ dtype: dtype);
+
+ public IInitializer zeros_initializer(Shape shape = null,
+ TF_DataType dtype = TF_DataType.TF_FLOAT) => new Zeros(shape: shape,
+ dtype: dtype);
+ }
+ }
+}
diff --git a/src/TensorFlowNET.Core/APIs/tf.io.cs b/src/TensorFlowNET.Core/APIs/tf.io.cs
new file mode 100644
index 000000000..ea1e44b28
--- /dev/null
+++ b/src/TensorFlowNET.Core/APIs/tf.io.cs
@@ -0,0 +1,66 @@
+/*****************************************************************************
+ Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+******************************************************************************/
+
+using System.Collections.Generic;
+using Tensorflow.IO;
+using Tensorflow.Operations;
+
+namespace Tensorflow
+{
+ public partial class tensorflow
+ {
+ public IoApi io { get; } = new IoApi();
+
+ public class IoApi
+ {
+ io_ops ops;
+ public GFile gfile;
+ public IoApi()
+ {
+ ops = new io_ops();
+ gfile = new GFile();
+ }
+
+ public Tensor read_file(string filename, string name = null)
+ => ops.read_file(filename, name);
+
+ public Tensor read_file(Tensor filename, string name = null)
+ => ops.read_file(filename, name);
+
+ public Operation save_v2(Tensor prefix, string[] tensor_names,
+ string[] shape_and_slices, Tensor[] tensors, string name = null)
+ => ops.save_v2(prefix, tensor_names, shape_and_slices, tensors, name: name);
+
+ public Tensor[] restore_v2(Tensor prefix, string[] tensor_names,
+ string[] shape_and_slices, TF_DataType[] dtypes, string name = null)
+ => ops.restore_v2(prefix, tensor_names, shape_and_slices, dtypes, name: name);
+
+ public Operation write_file(string filename, Tensor conentes, string name = null)
+ => write_file(Tensorflow.ops.convert_to_tensor(filename, TF_DataType.TF_STRING), conentes, name);
+
+ public Operation write_file(Tensor filename, Tensor conentes, string name = null)
+ => gen_ops.write_file(filename, conentes, name);
+ }
+
+ public GFile gfile = new GFile();
+
+ public ITensorOrOperation[] import_graph_def(GraphDef graph_def,
+ Dictionary input_map = null,
+ string[] return_elements = null,
+ string name = null,
+ OpList producer_op_list = null) => importer.import_graph_def(graph_def, input_map, return_elements, name: name, producer_op_list: producer_op_list);
+ }
+}
diff --git a/src/TensorFlowNET.Core/APIs/tf.linalg.cs b/src/TensorFlowNET.Core/APIs/tf.linalg.cs
new file mode 100644
index 000000000..32f64ec35
--- /dev/null
+++ b/src/TensorFlowNET.Core/APIs/tf.linalg.cs
@@ -0,0 +1,111 @@
+/*****************************************************************************
+ Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+******************************************************************************/
+using Tensorflow.NumPy;
+using static Tensorflow.Binding;
+
+namespace Tensorflow
+{
+ public partial class tensorflow
+ {
+ public LinalgApi linalg { get; } = new LinalgApi();
+
+ public class LinalgApi
+ {
+ linalg_ops ops = new linalg_ops();
+
+ public Tensor einsum(string equation, Tensors inputs, string name = null)
+ => math_ops.einsum(equation, inputs, name: name);
+
+ public Tensor eye(int num_rows,
+ int num_columns = -1,
+ Shape batch_shape = null,
+ TF_DataType dtype = TF_DataType.TF_DOUBLE,
+ string name = null)
+ => ops.eye(num_rows, num_columns: num_columns, batch_shape: batch_shape, dtype: dtype, name: name);
+
+ public Tensor diag(Tensor diagonal, string name = null)
+ => gen_array_ops.diag(diagonal, name: name);
+
+ public Tensor matmul(Tensor a, Tensor b)
+ => math_ops.matmul(a, b);
+
+ public Tensor norm(Tensor a, string ord = "euclidean", Axis axis = null, string name = null)
+ => ops.norm(a, ord: ord, axis: axis, name: name);
+
+ public Tensor batch_matmul(Tensor x, Tensor y, bool adj_x = false, bool adj_y = false, string name = null)
+ => math_ops.batch_matmul(x, y, adj_x: adj_x, adj_y: adj_y, name: name);
+
+ public Tensor inv(Tensor input, bool adjoint = false, string name = null)
+ => ops.matrix_inverse(input, adjoint: adjoint, name: name);
+
+ public Tensor global_norm(Tensor[] t_list, string name = null)
+ => clip_ops.global_norm(t_list, name: name);
+
+ public Tensor l2_normalize(Tensor x,
+ int axis = 0,
+ float epsilon = 1e-12f,
+ string name = null)
+ => nn_impl.l2_normalize(x, axis: axis, epsilon: constant_op.constant(epsilon), name: name);
+
+ public Tensor lstsq(Tensor matrix, Tensor rhs,
+ NDArray l2_regularizer = null, bool fast = true, string name = null)
+ => ops.matrix_solve_ls(matrix, rhs, l2_regularizer: l2_regularizer, fast: fast, name: name);
+
+ public Tensors qr(Tensor input, bool full_matrices = true, string name = null)
+ => ops.qr(input, full_matrices: full_matrices, name: name);
+
+ public Tensor tensor_diag_part(Tensor input, string name = null)
+ => gen_array_ops.diag_part(input, name: name);
+
+ public Tensor tensordot(Tensor x, Tensor y, NDArray axes, string name = null)
+ => math_ops.tensordot(x, y, axes, name: name);
+ }
+
+ public Tensor diag(Tensor diagonal, string name = null)
+ => gen_array_ops.diag(diagonal, name: name);
+
+ public Tensor matmul(Tensor a, Tensor b, bool transpose_a = false, bool transpose_b = false)
+ => math_ops.matmul(a, b, transpose_a: transpose_a, transpose_b: transpose_b);
+
+ ///
+ /// Multiply slices of the two matrices "x" and "y".
+ ///
+ ///
+ /// The `BatchMatMul` operation is embedded into the
+ /// `MatMul` operation on the DLL side. However the expected
+ /// attributes are not the same, hence we need to expose this
+ /// method to have the right args list on the `_apply_op_helper`
+ /// function.
+ ///
+ /// For each rank > 2 the first rank - 2 dimensions are considered
+ /// as fixed, and have to be consistent across the two matrices. A
+ /// common matrix multiplication is then applied over the residual
+ /// 2 dimensions.
+ ///
+ /// e.g.
+ /// x is (3, 6, 12); y is (3, 12, 6)
+ /// batch_matmul(x, y) ==> (3, 6, 6)
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public Tensor batch_matmul(Tensor x, Tensor y, bool adj_x = false, bool adj_y = false, string name = null)
+ => math_ops.batch_matmul(x, y, adj_x: adj_x, adj_y: adj_y, name: name);
+ }
+}
diff --git a/src/TensorFlowNET.Core/APIs/tf.logging.cs b/src/TensorFlowNET.Core/APIs/tf.logging.cs
new file mode 100644
index 000000000..0e10c1610
--- /dev/null
+++ b/src/TensorFlowNET.Core/APIs/tf.logging.cs
@@ -0,0 +1,23 @@
+/*****************************************************************************
+ Copyright 2021 Haiping Chen. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+******************************************************************************/
+
+namespace Tensorflow
+{
+ public partial class tensorflow
+ {
+ public logging_ops logging => new logging_ops();
+ }
+}
diff --git a/src/TensorFlowNET.Core/APIs/tf.loss.cs b/src/TensorFlowNET.Core/APIs/tf.loss.cs
new file mode 100644
index 000000000..48ed01500
--- /dev/null
+++ b/src/TensorFlowNET.Core/APIs/tf.loss.cs
@@ -0,0 +1,23 @@
+/*****************************************************************************
+ Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+******************************************************************************/
+
+namespace Tensorflow
+{
+ public partial class tensorflow
+ {
+ public LossesImpl losses => new LossesImpl();
+ }
+}
diff --git a/src/TensorFlowNET.Core/APIs/tf.math.cs b/src/TensorFlowNET.Core/APIs/tf.math.cs
new file mode 100644
index 000000000..da54a9dd7
--- /dev/null
+++ b/src/TensorFlowNET.Core/APIs/tf.math.cs
@@ -0,0 +1,628 @@
+/*****************************************************************************
+ Copyright 2023 The TensorFlow.NET Authors. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+******************************************************************************/
+
+using Tensorflow.NumPy;
+using Tensorflow.Operations;
+
+namespace Tensorflow
+{
+ public partial class tensorflow
+ {
+ public MathApi math { get; } = new MathApi();
+ public class MathApi
+ {
+ public Tensor argmax(Tensor input, Axis axis = null, string name = null, int? dimension = null, TF_DataType output_type = TF_DataType.TF_INT64)
+ => gen_math_ops.arg_max(input, axis, name: name, output_type: output_type);
+
+ public Tensor count_nonzero(Tensor input, Axis? axis = null, bool? keepdims = null, TF_DataType dtype = TF_DataType.TF_INT64, string name = null)
+ => math_ops.count_nonzero_v2(input, axis: axis, keepdims: keepdims ?? false, dtype: dtype);
+ public Tensor log(Tensor x, string name = null)
+ => gen_math_ops.log(x, name);
+
+ ///
+ /// Computes the Gauss error function of `x` element-wise.
+ ///
+ ///
+ ///
+ ///
+ public Tensor erf(Tensor x, string name = null)
+ => math_ops.erf(x, name);
+
+ public Tensor multiply(Tensor x, Tensor y, string name = null)
+ => math_ops.multiply(x, y, name: name);
+ public Tensor divide_no_nan(Tensor a, Tensor b, string name = null)
+ => math_ops.div_no_nan(a, b);
+
+ ///
+ /// Computes the Euclidean norm of elements across dimensions of a tensor.
+ ///
+ /// The tensor to reduce. Should have numeric type.
+ /// The dimensions to reduce. If `None` (the default), reduces all dimensions.Must be in the range `[-rank(input_tensor), rank(input_tensor))`
+ /// If true, retains reduced dimensions with length 1.
+ /// A name for the operation (optional).
+ /// The reduced tensor, of the same dtype as the input_tensor.
+ public Tensor reduce_euclidean_norm(Tensor input_tensor, Axis? axis = null, bool keepdims = false, string name = null)
+ => math_ops.reduce_euclidean_norm(input_tensor, axis: axis, keepdims: keepdims, name);
+
+ public Tensor square(Tensor x, string name = null)
+ => math_ops.square(x, name: name);
+
+ public Tensor sum(Tensor x, Axis? axis = null, string name = null)
+ => math_ops.reduce_sum(x, axis: axis, name: name);
+
+ public Tensor softplus(Tensor features, string name = null)
+ => nn_ops.softplus(features, name: name);
+
+ public Tensor tanh(Tensor x, string name = null)
+ => math_ops.tanh(x, name: name);
+
+ ///
+ /// Finds values and indices of the `k` largest entries for the last dimension.
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public Tensors top_k(Tensor input, int k, bool sorted = true, string name = null)
+ => nn_ops.top_kv2(input, k, sorted: sorted, name: name);
+
+ public Tensor in_top_k(Tensor predictions, Tensor targets, int k, string name = "InTopK")
+ => nn_ops.in_top_k(predictions, targets, k, name);
+
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public Tensor bincount(Tensor arr, Tensor weights = null,
+ Tensor minlength = null,
+ Tensor maxlength = null,
+ TF_DataType dtype = TF_DataType.TF_INT32,
+ string name = null,
+ Shape axis = null,
+ bool binary_output = false)
+ => math_ops.bincount(arr, weights: weights, minlength: minlength, maxlength: maxlength,
+ dtype: dtype, name: name, axis: axis, binary_output: binary_output);
+
+ public Tensor real(Tensor x, string name = null)
+ => gen_ops.real(x, x.dtype.real_dtype(), name);
+ public Tensor imag(Tensor x, string name = null)
+ => gen_ops.imag(x, x.dtype.real_dtype(), name);
+
+ public Tensor conj(Tensor x, string name = null)
+ => gen_ops.conj(x, name);
+ public Tensor angle(Tensor x, string name = null)
+ => gen_ops.angle(x, x.dtype.real_dtype(), name);
+ }
+
+ public Tensor abs(Tensor x, string name = null)
+ => math_ops.abs(x, name);
+
+ ///
+ /// Computes acos of x element-wise.
+ ///
+ ///
+ ///
+ ///
+ public Tensor acos(Tensor x, string name = null)
+ => gen_math_ops.acos(x, name);
+
+ ///
+ /// Computes asin of x element-wise.
+ ///
+ ///
+ ///
+ ///
+ public Tensor asin(Tensor x, string name = null)
+ => gen_math_ops.asin(x, name);
+
+ public Tensor add(Tensor a, Tensor b, string name = null)
+ => gen_math_ops.add(a, b, name: name);
+
+ public Tensor add(Tx a, Ty b, string name = null)
+ => gen_math_ops.add(ops.convert_to_tensor(a), ops.convert_to_tensor(b), name: name);
+
+ ///
+ /// Adds all input tensors element-wise.
+ ///
+ ///
+ ///
+ /// A `Tensor` of same shape and type as the elements of `inputs`.
+ public Tensor add_n(Tensor[] inputs, string name = null)
+ => math_ops.add_n(inputs, name: name);
+
+ ///
+ /// Computes atan of x element-wise.
+ ///
+ ///
+ ///
+ ///
+ public Tensor atan(Tensor x, string name = null)
+ => gen_math_ops.atan(x, name);
+
+ public Tensor arg_max(Tensor input, int dimension, TF_DataType output_type = TF_DataType.TF_INT64, string name = null)
+ => gen_math_ops.arg_max(input, ops.convert_to_tensor(dimension), output_type: output_type, name: name);
+
+ public Tensor arg_min(Tensor input, int dimension, TF_DataType output_type = TF_DataType.TF_INT64, string name = null)
+ => gen_math_ops.arg_min(input, ops.convert_to_tensor(dimension), output_type: output_type, name: name);
+
+ public Tensor is_finite(Tensor input, string name = null)
+ => gen_math_ops.is_finite(input, name);
+
+ public Tensor is_nan(Tensor input, string name = null)
+ => gen_math_ops.is_nan(input, name);
+
+ ///
+ /// Returns element-wise smallest integer not less than x.
+ ///
+ ///
+ ///
+ ///
+ public Tensor ceil(Tensor x, string name = null)
+ => gen_math_ops.ceil(x, name);
+
+ ///
+ /// Computes sin of x element-wise.
+ ///
+ ///
+ ///
+ ///
+ public Tensor sin(Tensor x, string name = null)
+ => gen_math_ops.sin(x, name);
+
+ ///
+ /// Computes hyperbolic sine of x element-wise.
+ ///
+ ///
+ ///
+ ///
+ public Tensor sinh(Tensor x, string name = null)
+ => gen_math_ops.sinh(x, name);
+
+ ///
+ /// Computes cos of x element-wise.
+ ///
+ ///
+ ///
+ ///
+ public Tensor cos(Tensor x, string name = null)
+ => gen_math_ops.cos(x, name);
+
+ public Tensor cos(float x, string name = null)
+ => gen_math_ops.cos(ops.convert_to_tensor(x), name);
+
+ ///
+ /// Computes hyperbolic cosine of x element-wise.
+ ///
+ ///
+ ///
+ ///
+ public Tensor cosh(Tensor x, string name = null)
+ => gen_math_ops.cosh(x, name);
+
+ public Tensor tan(Tensor x, string name = null)
+ => gen_math_ops.tan(x, name);
+
+ public Tensor tanh(Tensor x, string name = null)
+ => gen_math_ops.tanh(x, name);
+
+ ///
+ /// Returns element-wise largest integer not greater than x.
+ ///
+ ///
+ ///
+ ///
+ public Tensor floor(Tensor x, string name = null)
+ => gen_math_ops.floor(x, name);
+
+ ///
+ /// Returns the truth value of (x > y) element-wise.
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public Tensor greater(Tx x, Ty y, string name = null)
+ => gen_math_ops.greater(ops.convert_to_tensor(x), ops.convert_to_tensor(y), name);
+
+ ///
+ /// Returns the truth value of (x >= y) element-wise.
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public Tensor greater_equal(Tx x, Ty y, string name = null)
+ => gen_math_ops.greater_equal(ops.convert_to_tensor(x), ops.convert_to_tensor(y), name);
+
+ ///
+ /// Returns the truth value of (x < y) element-wise.
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public Tensor less(Tx x, Ty y, string name = null)
+ => gen_math_ops.less(ops.convert_to_tensor(x), ops.convert_to_tensor(y), name);
+
+ ///
+ /// Computes the log of the absolute value of `Gamma(x)` element-wise.
+ ///
+ /// A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`.
+ /// A name for the operation (optional).
+ /// A `Tensor`. Has the same type as `x`.
+ public Tensor lgamma(Tensor x, string name = null)
+ => gen_math_ops.lgamma(x, name: name);
+
+ ///
+ /// Returns the truth value of (x <= y) element-wise.
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public Tensor less_equal(Tx x, Ty y, string name = null)
+ => gen_math_ops.less_equal(ops.convert_to_tensor(x), ops.convert_to_tensor(y), name);
+
+ ///
+ /// Computes natural logarithm of (1 + x) element-wise.
+ ///
+ ///
+ ///
+ ///
+ public Tensor log1p(Tensor x, string name = null)
+ => gen_math_ops.log1p(x, name);
+
+ public Tensor logical_and(T x, T y, string name = null)
+ => gen_math_ops.logical_and(ops.convert_to_tensor(x), ops.convert_to_tensor(y), name);
+
+ public Tensor logical_not(Tensor x, string name = null)
+ => gen_math_ops.logical_not(x, name);
+
+ public Tensor logical_or(Tensor x, Tensor y, string name = null)
+ => gen_math_ops.logical_or(x, y, name);
+
+ public Tensor logical_xor(Tensor x, Tensor y, string name = "LogicalXor")
+ {
+ return gen_math_ops.logical_and(gen_math_ops.logical_or(x, y),
+ gen_math_ops.logical_not(gen_math_ops.logical_and(x, y)), name);
+ }
+
+ ///
+ /// Clips tensor values to a specified min and max.
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public Tensor _clip_by_value(Tensor t, Tensor clip_value_min, Tensor clip_value_max, string name = null)
+ => gen_math_ops.clip_by_value(t, clip_value_min, clip_value_max);
+
+ ///
+ /// Clips tensor values to a specified min and max.
+ ///
+ ///
+ /// A Tensor.
+ ///
+ ///
+ /// A 0-D (scalar) Tensor, or a Tensor with the same shape
+ /// as t. The minimum value to clip by.
+ ///
+ ///
+ /// A 0-D (scalar) Tensor, or a Tensor with the same shape
+ /// as t. The maximum value to clip by.
+ ///
+ ///
+ /// If specified, the created operation in the graph will be this one, otherwise it will be named 'ClipByValue'.
+ ///
+ ///
+ /// A clipped Tensor with the same shape as input 't'.
+ /// The Operation can be fetched from the resulting Tensor, by fetching the Operation property from the result.
+ ///
+ ///
+ /// Given a tensor t, this operation returns a tensor of the same type and
+ /// shape as t with its values clipped to clip_value_min and clip_value_max.
+ /// Any values less than clip_value_min are set to clip_value_min. Any values
+ /// greater than clip_value_max are set to clip_value_max.
+ ///
+ public Tensor clip_by_value(Tensor t, T1 clip_value_min, T2 clip_value_max, string name = "ClipByValue")
+ => clip_ops.clip_by_value(t, clip_value_min, clip_value_max, name);
+
+ public Tensor sub(Tx a, Ty b, string name = null)
+ => gen_math_ops.sub(ops.convert_to_tensor(a), ops.convert_to_tensor(b), name: name);
+
+ public Tensor divide(Tensor a, Tensor b)
+ => a / b;
+
+ public Tensor sqrt(Tensor a, string name = null)
+ => math_ops.sqrt(a, name);
+
+ public Tensor sign(Tensor a, string name = null)
+ => gen_math_ops.sign(a, name);
+
+ public Tensor subtract(Tensor x, T[] y, string name = null) where T : struct
+ => gen_math_ops.sub(x, ops.convert_to_tensor(y, dtype: x.dtype.as_base_dtype(), name: "y"), name);
+
+ ///
+ /// return x - y
+ ///
+ ///
+ ///
+ ///
+ ///
+ public Tensor subtract(Tensor x, Tensor y, string name = null)
+ => gen_math_ops.sub(x, y, name);
+
+ public Tensor log(Tensor x, string name = null)
+ => gen_math_ops.log(x, name);
+
+ public Tensor equal(Tensor x, Tensor y, string name = null)
+ => gen_math_ops.equal(x, y, name: name);
+
+ ///
+ /// Computes arctangent of `y/x` element-wise, respecting signs of the arguments.
+ ///
+ ///
+ ///
+ ///
+ ///
+ public Tensor atan2(Tensor y, Tensor x, string name = null)
+ => gen_math_ops.atan2(y, x, name);
+
+ ///
+ /// Computes the maximum of elements across dimensions of a tensor.
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public Tensor max(Tx input, Ty axis, bool keep_dims = false, string name = null)
+ => gen_math_ops.max(ops.convert_to_tensor(input), ops.convert_to_tensor(axis), keep_dims: keep_dims, name: name);
+
+ ///
+ /// Computes the minimum of elements across dimensions of a tensor.
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public Tensor min(Tx input, Ty axis, bool keep_dims = false, string name = null)
+ => gen_math_ops.min(ops.convert_to_tensor(input), ops.convert_to_tensor(axis), keep_dims: keep_dims, name: name);
+
+ ///
+ /// Returns the max of x and y (i.e. x > y ? x : y) element-wise.
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public Tensor maximum(T1 x, T2 y, string name = null)
+ => gen_math_ops.maximum(ops.convert_to_tensor(x), ops.convert_to_tensor(y), name: name);
+
+ ///
+ /// Returns the min of x and y (i.e. x < y ? x : y) element-wise.
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public Tensor minimum(T1 x, T2 y, string name = null)
+ => gen_math_ops.minimum(ops.convert_to_tensor(x), ops.convert_to_tensor(y), name: name);
+
+ public Tensor multiply(Tensor x, Tensor y, string name = null)
+ => gen_math_ops.mul(x, y, name: name);
+
+ ///
+ /// return x * y
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public Tensor multiply(Tx x, Ty y, string name = null)
+ => gen_math_ops.mul(ops.convert_to_tensor(x), ops.convert_to_tensor(y), name: name);
+ ///
+ /// return scalar product
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public Tensor dot_prod(Tx x, Ty y, NDArray axes, string name = null)
+ => math_ops.tensordot(convert_to_tensor(x), convert_to_tensor(y), axes, name: name);
+ public Tensor negative(Tensor x, string name = null)
+ => gen_math_ops.neg(x, name);
+
+ ///
+ /// Returns the truth value of (x != y) element-wise.
+ ///
+ ///
+ ///
+ ///
+ /// A `Tensor` of type bool with the same size as that of x or y.
+ public Tensor not_equal(Tx x, Ty y, string name = null)
+ => math_ops.not_equal(x, y, name: name);
+
+ ///
+ /// Divides x / y elementwise (using Python 2 division operator semantics).
+ ///
+ ///
+ ///
+ ///
+ ///
+ public Tensor div(Tensor x, Tensor y, string name = null)
+ => math_ops.div(x, y, name: name);
+
+ public Tensor divide(Tensor x, T[] y, string name = null) where T : struct
+ => x / ops.convert_to_tensor(y, dtype: x.dtype.as_base_dtype(), name: "y");
+
+ public Tensor pow(T1 x, T2 y, string name = "pow")
+ => math_ops.pow(x, y, name: name);
+
+ ///
+ /// Divides `x / y` elementwise, rounding toward the most negative integer.
+ ///
+ ///
+ ///
+ ///
+ /// `x / y` rounded down.
+ public Tensor floordiv(Tensor x, Tensor y, string name = null)
+ => math_ops.floordiv(x, y, name: name);
+
+ ///
+ /// Divides x / y elementwise (using Python 3 division operator semantics).
+ ///
+ ///
+ ///
+ ///
+ /// `x / y` evaluated in floating point.
+ public static Tensor truediv(Tensor x, Tensor y, string name = null)
+ => math_ops.truediv(x, y, name: name);
+
+ public Tensor range(object start, object limit = null, object delta = null, TF_DataType? dtype = null, string name = "range")
+ => math_ops.range(start, limit: limit, delta: delta, dtype: dtype, name: name);
+
+ public Tensor real(Tensor input, string name = null)
+ => math_ops.real(input, name);
+
+ ///
+ /// Computes the "logical or" of elements across dimensions of a tensor.
+ ///
+ /// The boolean tensor to reduce.
+ /// The dimensions to reduce.
+ /// If true, retains reduced dimensions with length 1.
+ ///
+ /// The reduced tensor.
+ public Tensor reduce_any(Tensor input_tensor, Axis? axis = null, bool keepdims = false, string name = null)
+ => math_ops.reduce_any(input_tensor, axis: axis, keepdims: keepdims, name: name);
+
+ ///
+ /// Computes the "logical and" of elements across dimensions of a tensor.
+ ///
+ ///
+ ///
+ ///
+ ///
+ /// The reduced tensor.
+ public Tensor reduce_all(Tensor input_tensor, Axis? axis = null, bool keepdims = false, string name = null)
+ => math_ops.reduce_all(input_tensor, axis: axis, keepdims: keepdims, name: name);
+
+ ///
+ /// Computes the product of elements across dimensions of a tensor.
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public Tensor reduce_prod(Tensor input_tensor, Axis? axis = null, bool keepdims = false, string name = null)
+ => math_ops.reduce_prod(input_tensor, axis: axis, keepdims: keepdims, name: name);
+
+ ///
+ /// Computes the sum of elements across dimensions of a tensor.
+ ///
+ ///
+ ///
+ ///
+ public Tensor reduce_sum(Tensor input, Axis? axis = null, Axis? reduction_indices = null,
+ bool keepdims = false, string name = null)
+ {
+ if (keepdims)
+ return math_ops.reduce_sum(input, axis: constant_op.constant(axis ?? reduction_indices), keepdims: keepdims, name: name);
+ else
+ return math_ops.reduce_sum(input, axis: constant_op.constant(axis ?? reduction_indices));
+ }
+
+ ///
+ /// Computes the maximum of elements across dimensions of a tensor.
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public Tensor reduce_max(Tensor input_tensor, Axis? axis = null, bool keepdims = false, string name = null)
+ => math_ops.reduce_max(input_tensor, axis, keepdims, name);
+
+ public Tensor reduce_min(Tensor input_tensor, Axis? axis = null, bool keepdims = false, string name = null)
+ => math_ops.reduce_min(input_tensor, axis, keepdims, name);
+
+ public Tensor reduce_std(Tensor input_tensor, Axis? axis = null, bool keepdims = false, string name = null)
+ => math_ops.reduce_std(input_tensor, axis, keepdims, name);
+
+ public Tensor reduce_variance(Tensor input_tensor, Axis? axis = null, bool keepdims = false, string name = null)
+ => math_ops.reduce_variance(input_tensor, axis, keepdims, name);
+
+ public Tensor sigmoid(T x, string name = null)
+ => math_ops.sigmoid(x, name: name);
+
+ public Tensor sum(Tensor input, int axis, bool keep_dims = false, string name = null)
+ => gen_math_ops.sum(input, ops.convert_to_tensor(axis), keep_dims: keep_dims, name: name);
+
+ public Tensor reduce_mean(Tensor input_tensor, Axis? axis = null, bool keepdims = false, string name = null, int? reduction_indices = null)
+ => math_ops.reduce_mean(input_tensor, axis: axis, keepdims: keepdims, name: name, reduction_indices: reduction_indices);
+
+ public Tensor round(Tensor x, string name = null)
+ => gen_math_ops.round(x, name: name);
+
+ public Tensor cast(Tensor x, TF_DataType dtype, string name = null)
+ => math_ops.cast(x, dtype, name);
+
+ public Tensor cumsum(Tensor x, int axis = 0, bool exclusive = false, bool reverse = false, string name = null)
+ => math_ops.cumsum(x, axis: axis, exclusive: exclusive, reverse: reverse, name: name);
+
+ public Tensor square(Tensor x, string name = null)
+ => gen_math_ops.square(x, name: name);
+ public Tensor squared_difference(Tensor x, Tensor y, string name = null)
+ => gen_math_ops.squared_difference(x: x, y: y, name: name);
+ public Tensor complex(Tensor real, Tensor imag, Tensorflow.TF_DataType? dtype = null,
+ string name = null) => gen_ops.complex(real, imag, dtype, name);
+ public Tensor exp(Tensor x,
+ string name = null) => gen_math_ops.exp(x, name);
+ }
+}
diff --git a/src/TensorFlowNET.Core/APIs/tf.nn.cs b/src/TensorFlowNET.Core/APIs/tf.nn.cs
new file mode 100644
index 000000000..112c48628
--- /dev/null
+++ b/src/TensorFlowNET.Core/APIs/tf.nn.cs
@@ -0,0 +1,248 @@
+/*****************************************************************************
+ Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+******************************************************************************/
+
+using System.Xml.Linq;
+using Tensorflow.Operations;
+using Tensorflow.Operations.Activation;
+using static Tensorflow.Binding;
+
+namespace Tensorflow
+{
+ public partial class tensorflow
+ {
+ public nn_internal nn { get; } = new nn_internal();
+
+ public class nn_internal
+ {
+ public Tensor conv2d(Tensor input, Tensor filter, int[] strides, string padding, bool use_cudnn_on_gpu = true,
+ string data_format = "NHWC", int[] dilations = null, string name = null)
+ {
+ return gen_nn_ops.conv2d(input, filter, strides, padding, use_cudnn_on_gpu,
+ data_format: data_format, dilations: dilations, name: name);
+ }
+
+ public Tensor[] ctc_greedy_decoder(Tensor inputs, Tensor sequence_length, bool merge_repeated = true, string name = null)
+ => gen_ctc_ops.ctc_greedy_decoder(inputs, sequence_length, merge_repeated: merge_repeated, name: name);
+
+ ///
+ /// Computes dropout.
+ ///
+ /// A floating point tensor.
+ /// (deprecated) A deprecated alias for `(1-rate)`.
+ ///
+ /// Used to create random seeds.
+ ///
+ /// A scalar `Tensor` with the same type as `x`.
+ /// A Tensor of the same shape of `x`.
+ public Tensor dropout(Tensor x, Tensor keep_prob = null, Tensor noise_shape = null, int? seed = null, string name = null,
+ float? rate = null)
+ {
+ Tensor keep = null;
+ if (keep_prob != null)
+ keep = 1.0f - keep_prob;
+ var rate_tensor = rate.HasValue ? tf.constant(rate.Value) : keep;
+ return nn_ops.dropout_v2(x, rate: rate_tensor, noise_shape: noise_shape, seed: seed, name: name);
+ }
+
+ ///
+ /// Creates a recurrent neural network specified by RNNCell `cell`.
+ ///
+ /// An instance of RNNCell.
+ /// The RNN inputs.
+ ///
+ ///
+ ///
+ /// A pair (outputs, state)
+ public (Tensor, Tensor) dynamic_rnn(RnnCell cell, Tensor inputs,
+ Tensor sequence_length = null, TF_DataType dtype = TF_DataType.DtInvalid,
+ int? parallel_iterations = null, bool swap_memory = false, bool time_major = false)
+ => rnn.dynamic_rnn(cell, inputs, sequence_length: sequence_length, dtype: dtype,
+ parallel_iterations: parallel_iterations, swap_memory: swap_memory,
+ time_major: time_major);
+
+ public Tensor elu(Tensor features, string name = null)
+ => gen_nn_ops.elu(features, name: name);
+
+ public (Tensor, Tensor) moments(Tensor x,
+ Axis axes,
+ string name = null,
+ bool keep_dims = false) => nn_impl.moments(x,
+ axes,
+ name: name,
+ keep_dims: keep_dims);
+
+ public Tensor embedding_lookup(IVariableV1 @params,
+ Tensor ids,
+ string partition_strategy = "mod",
+ string name = null) => embedding_ops._embedding_lookup_and_transform(@params,
+ ids,
+ partition_strategy: partition_strategy,
+ name: name);
+
+ public Tensor embedding_lookup(Tensor @params,
+ Tensor ids,
+ string partition_strategy = "mod",
+ string name = null) => embedding_ops._embedding_lookup_and_transform(new Tensor[] { @params },
+ ids,
+ partition_strategy: partition_strategy,
+ name: name);
+
+ public IActivation relu() => new relu();
+
+
+ public IActivation swish() => new swish();
+ public IActivation tanh() => new tanh();
+
+ public IActivation softmax() => new softmax();
+ public Tensor tanh(Tensor x, string name = null)
+ => gen_math_ops.tanh(x, name);
+
+ public Tensor relu(Tensor features, string name = null)
+ => gen_nn_ops.relu(features, name);
+
+ public Tensor relu6(Tensor features, string name = null)
+ => gen_nn_ops.relu6(features, name);
+
+ public Tensor[] fused_batch_norm(Tensor x,
+ Tensor scale,
+ Tensor offset,
+ Tensor mean = null,
+ Tensor variance = null,
+ float epsilon = 0.001f,
+ string data_format = "NHWC",
+ bool is_training = true,
+ string name = null,
+ float exponential_avg_factor = 1.0f) => nn_impl.fused_batch_norm(x, scale, offset, mean, variance,
+ epsilon: epsilon,
+ data_format: data_format,
+ is_training: is_training,
+ name: name,
+ exponential_avg_factor: exponential_avg_factor);
+
+ ///
+ /// Normalizes a tensor by `mean` and `variance`, and applies (optionally) a`scale` \\(\gamma\\) to it, as well as an `offset` \\(\beta\\).
+ ///
+ /// A floating point tensor.
+ /// A mean `Tensor`.
+ /// A variance `Tensor`.
+ /// An offset `Tensor`, often denoted \\(\beta\\) in equations, or NULL. If present, will be added to the normalized tensor.
+ /// A scale `Tensor`, often denoted \\(\gamma\\) in equations, or NULL. If present, the scale is applied to the normalized tensor.
+ /// A small float number to avoid dividing by 0.
+ /// A name for this operation.
+ /// the normalized, scaled, offset tensor.
+ public Tensor batch_normalization(Tensor x,
+ Tensor mean,
+ Tensor variance,
+ Tensor offset,
+ Tensor scale,
+ float variance_epsilon,
+ string name = null) => nn_impl.batch_normalization(x, mean, variance, offset, scale, variance_epsilon, name);
+
+
+ public Tensor max_pool(Tensor value, int[] ksize, int[] strides, string padding, string data_format = "NHWC", string name = null)
+ => nn_ops.max_pool(value, ksize, strides, padding, data_format: data_format, name: name);
+
+ public Tensor in_top_k(Tensor predictions, Tensor targets, int k, string name = "InTopK")
+ => nn_ops.in_top_k(predictions, targets, k, name);
+
+ public Tensor[] top_k(Tensor input, int k = 1, bool sorted = true, string name = null)
+ => gen_nn_ops.top_kv2(input, k: ops.convert_to_tensor(k), sorted: sorted, name: name);
+
+ public Tensor bias_add(Tensor value, IVariableV1 bias, string data_format = null, string name = null)
+ {
+ return tf_with(ops.name_scope(name, "BiasAdd", new { value, bias }), scope =>
+ {
+ name = scope;
+ return gen_nn_ops.bias_add(value, ops.convert_to_tensor(bias), data_format: data_format, name: name);
+ });
+ }
+
+ public Tensor l2_loss(Tensor t, string name = null)
+ => nn_ops.l2_loss(t, name: name);
+
+ ///
+ /// Local Response Normalization.
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public Tensor lrn(Tensor input, int depth_radius = 5, int bias = 1,
+ int alpha = 1, float beta = 0.5f, string name = null)
+ => gen_nn_ops.lrn(input, depth_radius: depth_radius, bias: bias,
+ alpha: alpha, beta: beta, name: name);
+
+ public Tensor leaky_relu(Tensor features, float alpha = 0.2f, string name = null)
+ => nn_ops.leaky_relu(features, alpha: alpha, name: name);
+
+ public rnn_cell_impl rnn_cell => new rnn_cell_impl();
+
+ public Tensor sigmoid_cross_entropy_with_logits(Tensor labels, Tensor logits, string name = null)
+ => nn_impl.sigmoid_cross_entropy_with_logits(labels: labels, logits: logits, name: name);
+
+ public Tensor softmax(Tensor logits, int axis = -1, string name = null)
+ => gen_nn_ops.softmax(logits, name);
+
+
+ ///
+ /// Computes sparse softmax cross entropy between `logits` and `labels`.
+ ///
+ ///
+ ///
+ ///
+ ///
+ public Tensor sparse_softmax_cross_entropy_with_logits(Tensor labels = null,
+ Tensor logits = null, string name = null)
+ => nn_ops.sparse_softmax_cross_entropy_with_logits(labels: labels, logits: logits, name: name);
+
+ ///
+ /// Computes softmax cross entropy between `logits` and `labels`.
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public Tensor softmax_cross_entropy_with_logits(Tensor labels, Tensor logits, int dim = -1, string name = null)
+ {
+ tf_with(ops.name_scope(name, "softmax_cross_entropy_with_logits_sg", new { logits, labels }), scope =>
+ {
+ name = scope;
+ labels = array_ops.stop_gradient(labels, name: "labels_stop_gradient");
+ });
+
+ return softmax_cross_entropy_with_logits_v2(labels, logits, axis: dim, name: name);
+ }
+
+ public Tensor softmax_cross_entropy_with_logits_v2(Tensor labels, Tensor logits, int axis = -1, string name = null)
+ => nn_ops.softmax_cross_entropy_with_logits_v2_helper(labels, logits, axis: axis, name: name);
+
+ ///
+ /// Computes sigmoid of `x` element-wise.
+ /// Specifically, `y = 1 / (1 + exp(-x))`.
+ ///
+ ///
+ ///
+ /// A name for the operation (optional).
+ /// A Tensor with the same type as `x`.
+ public Tensor sigmoid(T x, string name = null)
+ => math_ops.sigmoid(x, name: name);
+ }
+ }
+}
diff --git a/src/TensorFlowNET.Core/APIs/tf.numpy.cs b/src/TensorFlowNET.Core/APIs/tf.numpy.cs
new file mode 100644
index 000000000..392ba915f
--- /dev/null
+++ b/src/TensorFlowNET.Core/APIs/tf.numpy.cs
@@ -0,0 +1,29 @@
+/*****************************************************************************
+ Copyright 2021 Haiping Chen. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+******************************************************************************/
+
+using Tensorflow.NumPy;
+
+namespace Tensorflow
+{
+ public partial class tensorflow
+ {
+ ///
+ /// NumPy API on TensorFlow
+ /// https://www.tensorflow.org/api_docs/python/tf/experimental/numpy
+ ///
+ public NumPyImpl numpy => new NumPyImpl();
+ }
+}
diff --git a/src/TensorFlowNET.Core/APIs/tf.ops.cs b/src/TensorFlowNET.Core/APIs/tf.ops.cs
new file mode 100644
index 000000000..ebf35e3f9
--- /dev/null
+++ b/src/TensorFlowNET.Core/APIs/tf.ops.cs
@@ -0,0 +1,97 @@
+/*****************************************************************************
+ Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+******************************************************************************/
+
+using System;
+using System.Collections.Generic;
+
+namespace Tensorflow
+{
+ public partial class tensorflow
+ {
+ public void add_to_collection(string name, T value)
+ => get_default_graph().add_to_collection(name, value);
+
+ public void add_to_collections(List names, T value)
+ => get_default_graph().add_to_collections(names, value);
+
+ public (Tensors, Tensor) clip_by_global_norm(Tensor[] t_list, float clip_norm, Tensor use_norm = null, string name = null)
+ => clip_ops.clip_by_global_norm(t_list, clip_norm, use_norm: use_norm, name: name);
+
+ public Tensor assign(IVariableV1 @ref, object value, bool validate_shape = true, bool use_locking = true, string name = null)
+ => state_ops.assign(@ref, value, validate_shape, use_locking, name);
+
+ public void device(string device_name)
+ => get_default_graph().device(device_name);
+
+ public List get_collection(string key, string scope = "")
+ => get_default_graph().get_collection(key, scope: scope);
+
+ ///
+ /// A context manager that lifts ops out of control-flow scopes and function-building graphs.
+ /// When eager execution is enabled, code inside an init_scope block runs with
+ /// eager execution enabled even when tracing a `tf.function`.
+ ///
+ public ops.NameScope init_scope()
+ => ops.init_scope();
+
+ ///
+ /// Returns a context manager that creates hierarchical names for operations.
+ ///
+ /// The name argument that is passed to the op function.
+ /// The default name to use if the name argument is None.
+ /// The list of Tensor arguments that are passed to the op function.
+ /// The scope name.
+ public ops.NameScope name_scope(string name, string default_name = "", object values = null)
+ => new ops.NameScope(name, default_name, values);
+
+ ///
+ /// Does nothing. Only useful as a placeholder for control edges.
+ ///
+ ///
+ ///
+ public Operation no_op(string name = null)
+ => gen_control_flow_ops.no_op(name: name);
+
+ ///
+ /// map on the list of tensors unpacked from `elems` on dimension 0.
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ /// A tensor or (possibly nested) sequence of tensors.
+ public Tensor map_fn(Func fn,
+ Tensor elems,
+ TF_DataType dtype = TF_DataType.DtInvalid,
+ int parallel_iterations = -1,
+ bool back_prop = true,
+ bool swap_memory = false,
+ bool infer_shape = true,
+ string name = null)
+ => Operation.map_fn(fn,
+ elems,
+ dtype,
+ parallel_iterations: parallel_iterations,
+ back_prop: back_prop,
+ swap_memory: swap_memory,
+ infer_shape: infer_shape,
+ name: name);
+ }
+}
diff --git a/src/TensorFlowNET.Core/APIs/tf.queue.cs b/src/TensorFlowNET.Core/APIs/tf.queue.cs
new file mode 100644
index 000000000..a4757890e
--- /dev/null
+++ b/src/TensorFlowNET.Core/APIs/tf.queue.cs
@@ -0,0 +1,126 @@
+/*****************************************************************************
+ Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+******************************************************************************/
+
+using Tensorflow.Queues;
+
+namespace Tensorflow
+{
+ public partial class tensorflow
+ {
+ ///
+ /// A FIFOQueue that supports batching variable-sized tensors by padding.
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public PaddingFIFOQueue PaddingFIFOQueue(int capacity,
+ TF_DataType[] dtypes,
+ Shape[] shapes,
+ string[] names = null,
+ string shared_name = null,
+ string name = "padding_fifo_queue")
+ => new PaddingFIFOQueue(capacity,
+ dtypes,
+ shapes,
+ names,
+ shared_name: shared_name,
+ name: name);
+
+ public PaddingFIFOQueue PaddingFIFOQueue(int capacity,
+ TF_DataType dtype,
+ Shape shape,
+ string shared_name = null,
+ string name = "padding_fifo_queue")
+ => new PaddingFIFOQueue(capacity,
+ new[] { dtype },
+ new[] { shape },
+ shared_name: shared_name,
+ name: name);
+
+ ///
+ /// A queue implementation that dequeues elements in first-in first-out order.
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public FIFOQueue FIFOQueue(int capacity,
+ TF_DataType[] dtypes,
+ Shape[] shapes = null,
+ string[] names = null,
+ string shared_name = null,
+ string name = "fifo_queue")
+ => new FIFOQueue(capacity,
+ dtypes,
+ shapes,
+ names,
+ shared_name: shared_name,
+ name: name);
+
+ public FIFOQueue FIFOQueue(int capacity,
+ TF_DataType dtype,
+ Shape shape = null,
+ string shared_name = null,
+ string name = "fifo_queue")
+ => new FIFOQueue(capacity,
+ new[] { dtype },
+ new[] { shape ?? Shape.Null },
+ shared_name: shared_name,
+ name: name);
+
+ ///
+ /// Creates a queue that dequeues elements in a first-in first-out order.
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public PriorityQueue PriorityQueue(int capacity,
+ TF_DataType dtype,
+ Shape shape = null,
+ string shared_name = null,
+ string name = "priority_queue")
+ => new PriorityQueue(capacity,
+ new[] { dtype },
+ new[] { shape ?? Shape.Null },
+ shared_name: shared_name,
+ name: name);
+
+ public RandomShuffleQueue RandomShuffleQueue(int capacity,
+ int min_after_dequeue,
+ TF_DataType dtype,
+ Shape shape = null,
+ int? seed = null,
+ string shared_name = null,
+ string name = "random_shuffle_queue")
+ => new RandomShuffleQueue(capacity,
+ min_after_dequeue: min_after_dequeue,
+ new[] { dtype },
+ new[] { shape ?? Shape.Null },
+ seed: seed,
+ shared_name: shared_name,
+ name: name);
+ }
+}
diff --git a/src/TensorFlowNET.Core/APIs/tf.random.cs b/src/TensorFlowNET.Core/APIs/tf.random.cs
new file mode 100644
index 000000000..4f4962840
--- /dev/null
+++ b/src/TensorFlowNET.Core/APIs/tf.random.cs
@@ -0,0 +1,128 @@
+/*****************************************************************************
+ Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+******************************************************************************/
+
+namespace Tensorflow
+{
+ public partial class tensorflow
+ {
+ public Random random => new Random();
+
+ public class Random
+ {
+ ///
+ /// Outputs random values from a normal distribution.
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public Tensor normal(Shape shape,
+ float mean = 0.0f,
+ float stddev = 1.0f,
+ TF_DataType dtype = TF_DataType.TF_FLOAT,
+ int? seed = null,
+ string name = null) => random_ops.random_normal(shape, mean, stddev, dtype, seed, name);
+
+ public Tensor stateless_normal(Shape shape,
+ float mean = 0.0f,
+ float stddev = 1.0f,
+ TF_DataType dtype = TF_DataType.TF_FLOAT,
+ string name = null) => stateless_random_ops.stateless_random_normal(shape, mean, stddev, dtype, name: name);
+
+ ///
+ /// Outputs random values from a truncated normal distribution.
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public Tensor truncated_normal(Shape shape,
+ float mean = 0.0f,
+ float stddev = 1.0f,
+ TF_DataType dtype = TF_DataType.TF_FLOAT,
+ int? seed = null,
+ string name = null) => random_ops.truncated_normal(shape, mean, stddev, dtype, seed, name);
+
+ public Tensor categorical(
+ Tensor logits,
+ int num_samples,
+ int? seed = null,
+ string name = null,
+ TF_DataType output_dtype = TF_DataType.DtInvalid) => random_ops.multinomial(logits, num_samples, seed: seed, name: name, output_dtype: output_dtype);
+
+ public Tensor uniform(Shape shape,
+ float minval = 0,
+ float maxval = 1,
+ TF_DataType dtype = TF_DataType.TF_FLOAT,
+ int? seed = null,
+ string name = null)
+ {
+ if (dtype.is_integer())
+ return random_ops.random_uniform_int(shape, (int)minval, (int)maxval, seed, name);
+ else
+ return random_ops.random_uniform(shape, minval, maxval, dtype, seed, name);
+ }
+ }
+
+ public Tensor random_uniform(Shape shape,
+ float minval = 0,
+ float maxval = 1,
+ TF_DataType dtype = TF_DataType.TF_FLOAT,
+ int? seed = null,
+ string name = null)
+ => random.uniform(shape, minval: minval, maxval: maxval, dtype: dtype, seed: seed, name: name);
+
+ public Tensor truncated_normal(Shape shape,
+ float mean = 0.0f,
+ float stddev = 1.0f,
+ TF_DataType dtype = TF_DataType.TF_FLOAT,
+ int? seed = null,
+ string name = null)
+ => random_ops.truncated_normal(shape, mean, stddev, dtype, seed, name);
+
+ ///
+ /// Randomly shuffles a tensor along its first dimension.
+ ///
+ ///
+ ///
+ ///
+ ///
+ /// A tensor of same shape and type as value, shuffled along its
+ /// first dimension.
+ ///
+ public Tensor random_shuffle(Tensor value, int? seed = null, string name = null)
+ => random_ops.random_shuffle(value, seed: seed, name: name);
+
+ public void set_random_seed(int seed)
+ {
+ if (executing_eagerly())
+ Context.set_global_seed(seed);
+ else
+ ops.get_default_graph().seed = seed;
+ }
+
+ public Tensor multinomial(Tensor logits, int num_samples, int? seed = null,
+ string name = null, TF_DataType output_dtype = TF_DataType.DtInvalid)
+ => random_ops.multinomial(logits, num_samples, seed: seed,
+ name: name, output_dtype: output_dtype);
+ }
+}
diff --git a/src/TensorFlowNET.Core/APIs/tf.reduce_logsumexp.cs b/src/TensorFlowNET.Core/APIs/tf.reduce_logsumexp.cs
new file mode 100644
index 000000000..41f0ec45d
--- /dev/null
+++ b/src/TensorFlowNET.Core/APIs/tf.reduce_logsumexp.cs
@@ -0,0 +1,27 @@
+/*****************************************************************************
+ Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+******************************************************************************/
+
+namespace Tensorflow
+{
+ public partial class tensorflow
+ {
+ public Tensor reduce_logsumexp(Tensor input_tensor,
+ Axis? axis = null,
+ bool keepdims = false,
+ string name = null) => math_ops.reduce_logsumexp(input_tensor, axis, keepdims, name);
+
+ }
+}
diff --git a/src/TensorFlowNET.Core/APIs/tf.reshape.cs b/src/TensorFlowNET.Core/APIs/tf.reshape.cs
new file mode 100644
index 000000000..102a81323
--- /dev/null
+++ b/src/TensorFlowNET.Core/APIs/tf.reshape.cs
@@ -0,0 +1,36 @@
+/*****************************************************************************
+ Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+******************************************************************************/
+
+namespace Tensorflow
+{
+ public partial class tensorflow
+ {
+ public Tensor reshape(Tensor tensor,
+ Shape shape,
+ string name = null)
+ => gen_array_ops.reshape(tensor, shape, name);
+
+ public Tensor reshape(Tensor tensor,
+ Tensor shape,
+ string name = null)
+ => gen_array_ops.reshape(tensor, shape, name);
+
+ public Tensor reshape(Tensor tensor,
+ object[] shape,
+ string name = null)
+ => array_ops.reshape(tensor, shape, name);
+ }
+}
diff --git a/src/TensorFlowNET.Core/APIs/tf.saved_model.cs b/src/TensorFlowNET.Core/APIs/tf.saved_model.cs
new file mode 100644
index 000000000..ef6251ca8
--- /dev/null
+++ b/src/TensorFlowNET.Core/APIs/tf.saved_model.cs
@@ -0,0 +1,20 @@
+using System;
+using System.Collections.Generic;
+using System.Text;
+using Tensorflow.Train;
+
+namespace Tensorflow
+{
+ public partial class tensorflow
+ {
+ public SavedModelAPI saved_model { get; } = new SavedModelAPI();
+ }
+
+ public class SavedModelAPI
+ {
+ public Trackable load(string export_dir, LoadOptions? options = null)
+ {
+ return Loader.load(export_dir, options);
+ }
+ }
+}
diff --git a/src/TensorFlowNET.Core/APIs/tf.scan.cs b/src/TensorFlowNET.Core/APIs/tf.scan.cs
new file mode 100644
index 000000000..5642eaaf1
--- /dev/null
+++ b/src/TensorFlowNET.Core/APIs/tf.scan.cs
@@ -0,0 +1,35 @@
+/*****************************************************************************
+ Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+******************************************************************************/
+
+using System;
+
+namespace Tensorflow
+{
+ public partial class tensorflow
+ {
+ public Tensor scan(
+ Func fn,
+ Tensor elems,
+ Tensor initializer = null,
+ int parallel_iterations = 10,
+ bool back_prop = true,
+ bool swap_memory = false,
+ bool infer_shape = true,
+ bool reverse = false,
+ string name = null) => functional_ops.scan(fn, elems, initializer, parallel_iterations, back_prop,
+ swap_memory, infer_shape, reverse, name);
+ }
+}
\ No newline at end of file
diff --git a/src/TensorFlowNET.Core/APIs/tf.signal.cs b/src/TensorFlowNET.Core/APIs/tf.signal.cs
new file mode 100644
index 000000000..2471124c5
--- /dev/null
+++ b/src/TensorFlowNET.Core/APIs/tf.signal.cs
@@ -0,0 +1,40 @@
+/*****************************************************************************
+ Copyright 2023 Konstantin Balashov All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+******************************************************************************/
+
+using Tensorflow.Operations;
+
+namespace Tensorflow
+{
+ public partial class tensorflow
+ {
+ public SignalApi signal { get; } = new SignalApi();
+ public class SignalApi
+ {
+ public Tensor fft(Tensor input, string name = null)
+ => gen_ops.f_f_t(input, name: name);
+ public Tensor ifft(Tensor input, string name = null)
+ => gen_ops.i_f_f_t(input, name: name);
+ public Tensor fft2d(Tensor input, string name = null)
+ => gen_ops.f_f_t2d(input, name: name);
+ public Tensor ifft2d(Tensor input, string name = null)
+ => gen_ops.i_f_f_t2d(input, name: name);
+ public Tensor fft3d(Tensor input, string name = null)
+ => gen_ops.f_f_t3d(input, name: name);
+ public Tensor ifft3d(Tensor input, string name = null)
+ => gen_ops.i_f_f_t3d(input, name: name);
+ }
+ }
+}
diff --git a/src/TensorFlowNET.Core/APIs/tf.sparse.cs b/src/TensorFlowNET.Core/APIs/tf.sparse.cs
new file mode 100644
index 000000000..f124f6105
--- /dev/null
+++ b/src/TensorFlowNET.Core/APIs/tf.sparse.cs
@@ -0,0 +1,62 @@
+/*****************************************************************************
+ Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+******************************************************************************/
+
+using System;
+using Tensorflow.Framework;
+
+namespace Tensorflow
+{
+ public partial class tensorflow
+ {
+ public SparseTensor SparseTensor(long[,] indices, Array values, long[] dense_shape)
+ => new SparseTensor(indices, values, dense_shape);
+
+ public Tensor sparse_tensor_to_dense(SparseTensor sp_input,
+ Array default_value = default,
+ bool validate_indices = true,
+ string name = null)
+ => gen_sparse_ops.sparse_to_dense(sp_input.indices,
+ sp_input.dense_shape,
+ sp_input.values,
+ default_value: default_value,
+ validate_indices: validate_indices,
+ name: name);
+
+ ///
+ /// Converts a sparse representation into a dense tensor.
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ /// Dense `Tensor` of shape `output_shape`. Has the same type as `sparse_values`.
+ public Tensor sparse_to_dense(Tensor sparse_indices,
+ Shape output_shape,
+ T sparse_values,
+ T default_value = default,
+ bool validate_indices = true,
+ string name = null)
+ => gen_sparse_ops.sparse_to_dense(sparse_indices,
+ output_shape,
+ sparse_values,
+ default_value: default_value,
+ validate_indices: validate_indices,
+ name: name);
+ }
+}
diff --git a/src/TensorFlowNET.Core/APIs/tf.state.cs b/src/TensorFlowNET.Core/APIs/tf.state.cs
new file mode 100644
index 000000000..d86f88b17
--- /dev/null
+++ b/src/TensorFlowNET.Core/APIs/tf.state.cs
@@ -0,0 +1,25 @@
+/*****************************************************************************
+ Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+******************************************************************************/
+
+namespace Tensorflow
+{
+ public partial class tensorflow
+ {
+ public ITensorOrOperation assign_add(IVariableV1 @ref, T value,
+ bool use_locking = false, string name = null)
+ => state_ops.assign_add(@ref, value, use_locking: use_locking, name: name);
+ }
+}
diff --git a/src/TensorFlowNET.Core/APIs/tf.strings.cs b/src/TensorFlowNET.Core/APIs/tf.strings.cs
new file mode 100644
index 000000000..ecaf775d0
--- /dev/null
+++ b/src/TensorFlowNET.Core/APIs/tf.strings.cs
@@ -0,0 +1,95 @@
+/*****************************************************************************
+ Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+******************************************************************************/
+
+using static Tensorflow.Binding;
+
+namespace Tensorflow
+{
+ public partial class tensorflow
+ {
+ public StringsApi strings { get; } = new StringsApi();
+
+ public class StringsApi
+ {
+ string_ops ops = new string_ops();
+
+ ///
+ /// Converts all uppercase characters into their respective lowercase replacements.
+ ///
+ ///
+ ///
+ ///
+ ///
+ public Tensor lower(Tensor input, string encoding = "", string name = null)
+ => ops.lower(input: input, encoding: encoding, name: name);
+
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public Tensor regex_replace(Tensor input, string pattern, string rewrite,
+ bool replace_global = true, string name = null)
+ => ops.regex_replace(input, pattern, rewrite,
+ replace_global: replace_global, name: name);
+
+ ///
+ /// Return substrings from `Tensor` of strings.
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ public Tensor substr(Tensor input, int pos, int len,
+ string name = null, string @uint = "BYTE")
+ => ops.substr(input, pos, len, @uint: @uint, name: name);
+
+ public Tensor substr(string input, int pos, int len,
+ string name = null, string @uint = "BYTE")
+ => ops.substr(input, pos, len, @uint: @uint, name: name);
+
+ ///
+ /// String lengths of `input`.
+ ///
+ ///
+ ///
+ ///
+ ///
+ public Tensor string_length(Tensor input, string name = null, string unit = "BYTE")
+ => ops.string_length(input, name: name, unit: unit);
+
+ public Tensor format(string template, Tensor[] inputs, string placeholder = "{}", int summarize = 3, string name = null)
+ => ops.string_format(inputs, template: template, placeholder: placeholder, summarize: summarize, name: name);
+
+ public RaggedTensor split(Tensor input, char sep = ' ', int maxsplit = -1, string name = null)
+ => ops.string_split_v2(input, sep: sep.ToString(), maxsplit : maxsplit, name : name);
+
+ public (RaggedTensor, RaggedTensor) unicode_decode_with_offsets(Tensor input, string input_encoding,
+ string errors = "replace", int replacement_char = 0xFFFD,
+ bool replace_control_characters = false, string name = null)
+ => ops.unicode_decode_with_offsets(input, input_encoding, errors,
+ replacement_char: replacement_char,
+ replace_control_characters: replace_control_characters,
+ name: name);
+ }
+ }
+}
diff --git a/src/TensorFlowNET.Core/APIs/tf.summary.cs b/src/TensorFlowNET.Core/APIs/tf.summary.cs
new file mode 100644
index 000000000..4d0492b60
--- /dev/null
+++ b/src/TensorFlowNET.Core/APIs/tf.summary.cs
@@ -0,0 +1,26 @@
+/*****************************************************************************
+ Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+******************************************************************************/
+
+namespace Tensorflow
+{
+ public partial class tensorflow
+ {
+ public Summaries.Summary summary = new Summaries.Summary();
+
+ public Tensor scalar(string name, Tensor tensor)
+ => summary.scalar(name, tensor);
+ }
+}
diff --git a/src/TensorFlowNET.Core/APIs/tf.tensor.cs b/src/TensorFlowNET.Core/APIs/tf.tensor.cs
new file mode 100644
index 000000000..b03168ab3
--- /dev/null
+++ b/src/TensorFlowNET.Core/APIs/tf.tensor.cs
@@ -0,0 +1,97 @@
+/*****************************************************************************
+ Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+******************************************************************************/
+
+using Tensorflow.Operations;
+
+namespace Tensorflow
+{
+ public partial class tensorflow
+ {
+ public Tensor convert_to_tensor(object value, TF_DataType dtype = TF_DataType.DtInvalid, string name = null, TF_DataType preferred_dtype = TF_DataType.DtInvalid)
+ => ops.convert_to_tensor(value, dtype, name, preferred_dtype: preferred_dtype);
+
+ public Tensor strided_slice(Tensor input, Tensor begin, Tensor end, Tensor strides = null,
+ int begin_mask = 0,
+ int end_mask = 0,
+ int ellipsis_mask = 0,
+ int new_axis_mask = 0,
+ int shrink_axis_mask = 0,
+ string name = null) => gen_array_ops.strided_slice(input: input,
+ begin: begin,
+ end: end,
+ strides: strides,
+ begin_mask: begin_mask,
+ end_mask: end_mask,
+ ellipsis_mask: ellipsis_mask,
+ new_axis_mask: new_axis_mask,
+ shrink_axis_mask: shrink_axis_mask,
+ name: name);
+
+ public Tensor strided_slice(Tensor input, T[] begin, T[] end, T[] strides = null,
+ int begin_mask = 0,
+ int end_mask = 0,
+ int ellipsis_mask = 0,
+ int new_axis_mask = 0,
+ int shrink_axis_mask = 0,
+ string name = null) => array_ops.strided_slice(input,
+ begin: ops.convert_to_tensor(begin),
+ end: ops.convert_to_tensor(end),
+ strides: ops.convert_to_tensor(strides),
+ begin_mask: begin_mask,
+ end_mask: end_mask,
+ ellipsis_mask: ellipsis_mask,
+ new_axis_mask: new_axis_mask,
+ shrink_axis_mask: shrink_axis_mask,
+ name: name);
+
+ ///
+ /// Splits a tensor into sub tensors.
+ ///
+ /// The Tensor to split.
+ /// Either an integer indicating the number of splits along split_dim or a 1-D integer
+ /// Tensor or Python list containing the sizes of each output tensor along split_dim.
+ /// If a scalar then it must evenly divide value.shape[axis]; otherwise the sum of sizes along the split dimension must match that of the value.
+ /// An integer or scalar int32 Tensor. The dimension along which to split. Must be in the range [-rank(value), rank(value)). Defaults to 0.
+ /// A name for the operation (optional)
+ /// if num_or_size_splits is a scalar returns num_or_size_splits Tensor objects;
+ /// if num_or_size_splits is a 1-D Tensor returns num_or_size_splits.get_shape[0] Tensor objects resulting from splitting value.
+ public Tensor[] split(Tensor value, int num_split, Axis axis, string name = null)
+ => array_ops.split(
+ value: value,
+ num_or_size_splits: num_split,
+ axis: axis,
+ name: name);
+
+ public Tensor[] split(Tensor value, int[] num_split, Axis axis, string name = null)
+ => array_ops.split(
+ value: value,
+ num_or_size_splits: num_split,
+ axis: axis,
+ name: name);
+
+ //public Tensor[] split(Tensor value, int num_split, Axis axis, string name = null)
+ // => array_ops.split(
+ // value: value,
+ // num_or_size_splits: num_split,
+ // axis: axis,
+ // name: name);
+
+ public Tensor ensure_shape(Tensor x, Shape shape, string name = null)
+ {
+ return gen_ops.ensure_shape(x, shape, name);
+ }
+ }
+}
diff --git a/src/TensorFlowNET.Core/APIs/tf.tile.cs b/src/TensorFlowNET.Core/APIs/tf.tile.cs
new file mode 100644
index 000000000..a3b497e8a
--- /dev/null
+++ b/src/TensorFlowNET.Core/APIs/tf.tile.cs
@@ -0,0 +1,34 @@
+/*****************************************************************************
+ Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+******************************************************************************/
+using static Tensorflow.Binding;
+
+namespace Tensorflow
+{
+ public partial class tensorflow
+ {
+ public Tensor tile(Tensor input, Tensor multiples, string name = null)
+ => gen_array_ops.tile(input, multiples, name);
+
+ public Tensor tile(Tensor input, object[] multiples, string name = null)
+ => array_ops.tile(input, constant_op.constant(shape_utils.from_object_array(multiples).dims), name);
+
+ public Tensor tile(Tensor input, Shape multiples, string name = null)
+ {
+ var multiples_tensor = constant_op.constant(multiples);
+ return gen_array_ops.tile(input, multiples_tensor, name);
+ }
+ }
+}
diff --git a/src/TensorFlowNET.Core/APIs/tf.train.cs b/src/TensorFlowNET.Core/APIs/tf.train.cs
new file mode 100644
index 000000000..cf02ed599
--- /dev/null
+++ b/src/TensorFlowNET.Core/APIs/tf.train.cs
@@ -0,0 +1,110 @@
+/*****************************************************************************
+ Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+******************************************************************************/
+
+using System.Collections.Generic;
+using Tensorflow.Train;
+
+namespace Tensorflow
+{
+ public partial class tensorflow
+ {
+ public train_internal train { get; } = new train_internal();
+
+ public class train_internal
+ {
+ public IVariableV1 create_global_step(Graph graph)
+ => TrainingUtil.create_global_step(graph);
+
+ public IVariableV1 get_global_step(Graph graph)
+ => TrainingUtil.get_global_step(graph);
+
+ public Optimizer GradientDescentOptimizer(float learning_rate)
+ => new GradientDescentOptimizer(learning_rate);
+
+ public Optimizer GradientDescentOptimizer(Tensor learning_rate)
+ => new GradientDescentOptimizer(learning_rate);
+
+ public Optimizer AdamOptimizer(float learning_rate, float epsilon = 1e-8f, string name = "Adam")
+ => new AdamOptimizer(learning_rate, epsilon: epsilon, name: name);
+
+ public Optimizer AdamOptimizer(float learning_rate, TF_DataType dtype, string name = "Adam")
+ => new AdamOptimizer(learning_rate, name: name, dtype: dtype);
+
+ public Optimizer AdamOptimizer(IVariableV1 learning_rate, string name = "Adam")
+ => new AdamOptimizer(learning_rate.AsTensor(), name: name);
+
+ public Optimizer AdamOptimizer(Tensor learning_rate, string name = "Adam")
+ => new AdamOptimizer(learning_rate, name: name);
+
+ public ExponentialMovingAverage ExponentialMovingAverage(float decay)
+ => new ExponentialMovingAverage(decay);
+
+ public Saver Saver(IVariableV1[] var_list = null, int max_to_keep = 5)
+ => new Saver(var_list: var_list, max_to_keep: max_to_keep);
+
+ public string write_graph(Graph graph, string logdir, string name, bool as_text = true)
+ => graph_io.write_graph(graph, logdir, name, as_text);
+
+ public Graph load_graph(string freeze_graph_pb)
+ => saver.load_graph(freeze_graph_pb);
+
+ public string freeze_graph(string checkpoint_dir, string output_pb_name, string[] output_node_names)
+ => saver.freeze_graph(checkpoint_dir, output_pb_name, output_node_names);
+
+ public Saver import_meta_graph(string meta_graph_or_file,
+ bool clear_devices = false,
+ string import_scope = "") => saver._import_meta_graph_with_return_elements(meta_graph_or_file,
+ clear_devices,
+ import_scope).Item1;
+
+ public (MetaGraphDef, Dictionary) export_meta_graph(string filename = "",
+ bool as_text = false,
+ bool clear_devices = false,
+ bool clear_extraneous_savers = false,
+ bool strip_default_attrs = false) => meta_graph.export_scoped_meta_graph(filename: filename,
+ as_text: as_text,
+ clear_devices: clear_devices,
+ clear_extraneous_savers: clear_extraneous_savers,
+ strip_default_attrs: strip_default_attrs);
+
+ public string latest_checkpoint(string checkpoint_dir, string latest_filename = null)
+ => checkpoint_management.latest_checkpoint(checkpoint_dir, latest_filename: latest_filename);
+
+ public CheckpointState get_checkpoint_state(string checkpoint_dir, string latest_filename = null)
+ => checkpoint_management.get_checkpoint_state(checkpoint_dir, latest_filename: latest_filename);
+
+ /*public Tensor polynomial_decay(float learning_rate,
+ RefVariable global_step,
+ float decay_steps,
+ float end_learning_rate = 0.0001f,
+ float power = 1.0f,
+ bool cycle = false,
+ string name = null)
+ {
+ var decayed = new PolynomialDecay(learning_rate,
+ decay_steps,
+ end_learning_rate: end_learning_rate,
+ power: power,
+ cycle: cycle,
+ name: name);
+
+ var decayed_lr = decayed.__call__(global_step);
+
+ return decayed_lr;
+ }*/
+ }
+ }
+}
diff --git a/src/TensorFlowNET.Core/APIs/tf.variable.cs b/src/TensorFlowNET.Core/APIs/tf.variable.cs
new file mode 100644
index 000000000..9ce864bd8
--- /dev/null
+++ b/src/TensorFlowNET.Core/APIs/tf.variable.cs
@@ -0,0 +1,53 @@
+/*****************************************************************************
+ Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+******************************************************************************/
+
+using System.Collections.Generic;
+using static Tensorflow.Binding;
+
+namespace Tensorflow
+{
+ public partial class tensorflow
+ {
+ public IVariableV1[] global_variables(string scope = null)
+ {
+ return (ops.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope) as List)
+ .ToArray();
+ }
+
+ ///
+ /// Returns an Op that initializes a list of variables.
+ ///
+ /// List of `Variable` objects to initialize.
+ /// Optional name for the returned operation.
+ /// An Op that run the initializers of all the specified variables.
+ public Operation variables_initializer(IVariableV1[] var_list, string name = "init")
+ => variables.variables_initializer(var_list, name: name);
+
+ public Operation global_variables_initializer()
+ => tf.compat.v1.global_variables_initializer();
+
+ ///
+ /// Returns all variables created with `trainable=True`.
+ ///
+ ///
+ ///
+ public IVariableV1[] trainable_variables(string scope = null)
+ => (variables.trainable_variables() as List).ToArray();
+
+ public VariableScope get_variable_scope()
+ => Tensorflow.variable_scope.get_variable_scope();
+ }
+}
diff --git a/src/TensorFlowNET.Core/Assembly/Properties.cs b/src/TensorFlowNET.Core/Assembly/Properties.cs
new file mode 100644
index 000000000..290a72df0
--- /dev/null
+++ b/src/TensorFlowNET.Core/Assembly/Properties.cs
@@ -0,0 +1,4 @@
+using System.Runtime.CompilerServices;
+#if DEBUG
+[assembly: InternalsVisibleTo("Tensorflow.UnitTest, PublicKey=00240000048000009400000006020000002400005253413100040000010001004b86c4cb78549b34bab61a3b1800e23bfeb5b3ec390074041536a7e3cbd97f5f04cf0f857155a8928eaa29ebfd11cfbbad3ba70efea7bda3226c6a8d370a4cd303f714486b6ebc225985a638471e6ef571cc92a4613c00b8fa65d61ccee0cbe5f36330c9a01f4183559f1bef24cc2917c6d913e3a541333a1d05d9bed22b38cb")]
+#endif
diff --git a/src/TensorFlowNET.Core/Attributes/c_api.ops.cs b/src/TensorFlowNET.Core/Attributes/c_api.ops.cs
new file mode 100644
index 000000000..ba6f653a1
--- /dev/null
+++ b/src/TensorFlowNET.Core/Attributes/c_api.ops.cs
@@ -0,0 +1,122 @@
+/*****************************************************************************
+ Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+******************************************************************************/
+
+using System;
+using System.Runtime.InteropServices;
+
+namespace Tensorflow
+{
+ public partial class c_api
+ {
+ ///
+ /// Fills in `value` with the value of the attribute `attr_name`. `value` must
+ /// point to an array of length at least `max_length` (ideally set to
+ /// TF_AttrMetadata.total_size from TF_OperationGetAttrMetadata(oper,
+ /// attr_name)).
+ ///
+ /// TF_Operation*
+ /// const char*
+ /// TF_Status*
+ ///
+ [DllImport(TensorFlowLibName)]
+ public static extern TF_AttrMetadata TF_OperationGetAttrMetadata(IntPtr oper, string attr_name, SafeStatusHandle status);
+
+ ///
+ /// Fills in `value` with the value of the attribute `attr_name`. `value` must
+ /// point to an array of length at least `max_length` (ideally set to
+ /// TF_AttrMetadata.total_size from TF_OperationGetAttrMetadata(oper,
+ /// attr_name)).
+ ///
+ /// TF_Operation*
+ /// const char*
+ /// void*
+ /// size_t
+ /// TF_Status*
+ [DllImport(TensorFlowLibName)]
+ public static extern void TF_OperationGetAttrString(IntPtr oper, string attr_name, IntPtr value, uint max_length, SafeStatusHandle status);
+
+ ///
+ /// Sets `output_attr_value` to the binary-serialized AttrValue proto
+ /// representation of the value of the `attr_name` attr of `oper`.
+ ///
+ ///
+ ///
+ [DllImport(TensorFlowLibName)]
+ public static extern int TF_OperationGetAttrValueProto(IntPtr oper, string attr_name, SafeBufferHandle output_attr_value, SafeStatusHandle status);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern void TF_OperationGetAttrType(IntPtr oper, string attr_name, IntPtr value, SafeStatusHandle status);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern void TF_OperationGetAttrInt(IntPtr oper, string attr_name, IntPtr value, SafeStatusHandle status);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern void TF_OperationGetAttrFloat(IntPtr oper, string attr_name, IntPtr value, SafeStatusHandle status);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern void TF_OperationGetAttrBool(IntPtr oper, string attr_name, IntPtr value, SafeStatusHandle status);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern void TF_OperationGetAttrShape(IntPtr oper, string attr_name, long[] value, int num_dims, SafeStatusHandle status);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern void TF_SetAttrBool(IntPtr desc, string attr_name, bool value);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern void TF_SetAttrValueProto(IntPtr desc, string attr_name, byte[] proto, ulong proto_len, SafeStatusHandle status);
+
+ ///
+ /// Set `num_dims` to -1 to represent "unknown rank".
+ ///
+ ///
+ ///
+ ///
+ ///
+ [DllImport(TensorFlowLibName)]
+ public static extern void TF_SetAttrShape(IntPtr desc, string attr_name, long[] dims, int num_dims);
+
+ ///
+ /// Call some TF_SetAttr*() function for every attr that is not
+ /// inferred from an input and doesn't have a default value you wish to
+ /// keep.
+ ///
+ /// `value` must point to a string of length `length` bytes.
+ ///
+ /// TF_OperationDescription*
+ /// const char*
+ /// const void*
+ /// size_t
+ [DllImport(TensorFlowLibName)]
+ public static extern void TF_SetAttrString(IntPtr desc, string attr_name, string value, uint length);
+
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ ///
+ [DllImport(TensorFlowLibName)]
+ public static extern void TF_SetAttrStringList(IntPtr desc, string attr_name, IntPtr[] values, uint[] lengths, int num_values);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern void TF_SetAttrTensor(IntPtr desc, string attr_name, SafeTensorHandle value, SafeStatusHandle status);
+
+ [DllImport(TensorFlowLibName)]
+ public static extern void TF_SetAttrType(IntPtr desc, string attr_name, TF_DataType value);
+ }
+}
diff --git a/src/TensorFlowNET.Core/Binding.FuncTools.cs b/src/TensorFlowNET.Core/Binding.FuncTools.cs
new file mode 100644
index 000000000..42a7b4ef9
--- /dev/null
+++ b/src/TensorFlowNET.Core/Binding.FuncTools.cs
@@ -0,0 +1,25 @@
+using System;
+
+namespace Tensorflow
+{
+ public static partial class Binding
+ {
+ public static class functools
+ {
+ public static PartialFunc partial(Func func, Tin arg)
+ => new PartialFunc
+ {
+ args = arg,
+ invoke = func
+ };
+ }
+
+ public class PartialFunc
+ {
+ public Tin args { get; set; }
+ public object[] keywords { get; set; }
+
+ public Func invoke { get; set; }
+ }
+ }
+}
diff --git a/src/TensorFlowNET.Core/Binding.Util.cs b/src/TensorFlowNET.Core/Binding.Util.cs
new file mode 100644
index 000000000..99ed5c1f3
--- /dev/null
+++ b/src/TensorFlowNET.Core/Binding.Util.cs
@@ -0,0 +1,537 @@
+/*****************************************************************************
+ Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+******************************************************************************/
+
+using Tensorflow.NumPy;
+using System;
+using System.Collections;
+using System.Collections.Generic;
+using System.ComponentModel;
+using System.Diagnostics;
+using System.IO;
+using System.Linq;
+using Tensorflow.Operations;
+
+namespace Tensorflow
+{
+ ///
+ /// Binding utilities to mimic python functions.
+ ///
+ public static partial class Binding
+ {
+ public static T2 get(this Dictionary dict, T1 key)
+ => key == null ?
+ default :
+ (dict.ContainsKey(key) ? dict[key] : default);
+
+ public static void Update(this IList list, T element)
+ {
+ var index = list.IndexOf(element);
+ if (index < 0)
+ list.Add(element);
+ else
+ {
+ list[index] = element;
+ }
+ }
+
+ public static void difference_update(this IList list, IList list2)
+ {
+ foreach(var el in list2)
+ {
+ if (list.Contains(el))
+ list.Remove(el);
+ }
+ }
+
+ public static void add(this IList list, T element)
+ => list.Add(element);
+
+ public static void add(this IList list, IEnumerable elements)
+ {
+ foreach (var ele in elements)
+ list.Add(ele);
+ }
+
+ public static void append(this IList list, T element)
+ => list.Insert(list.Count, element);
+
+ public static void append(this IList list, IList elements)
+ {
+ for (int i = 0; i < elements.Count(); i++)
+ list.Insert(list.Count, elements[i]);
+ }
+
+ public static T[] concat(this IList list1, IList list2)
+ {
+ var list = new List();
+ list.AddRange(list1);
+ list.AddRange(list2);
+ return list.ToArray();
+ }
+
+ public static void extend(this List list, IEnumerable elements)
+ => list.AddRange(elements);
+
+ private static string _tostring(object obj)
+ {
+ switch (obj)
+ {
+ case NDArray nd:
+ return nd.ToString();
+ /*case Array arr:
+ if (arr.Rank != 1 || arr.GetType().GetElementType()?.IsArray == true)
+ arr = Arrays.Flatten(arr);
+ var objs = toObjectArray(arr);
+ return $"[{string.Join(", ", objs.Select(_tostring))}]";*/
+ default:
+ return obj?.ToString() ?? "null";
+ }
+ }
+
+ private static TextWriter _writer = Console.Out;
+
+ public static TextWriter tf_output_redirect {
+ set
+ {
+ if(_writer != null)
+ {
+ _writer.Flush();
+ if (_writer is StringWriter sw)
+ sw.GetStringBuilder().Clear();
+ }
+
+ _writer = value;
+ }
+ get => _writer ?? Console.Out;
+ }
+
+ public static void print(object obj)
+ {
+ tf_output_redirect.WriteLine(_tostring(obj));
+ }
+
+ public static void print(string format, params object[] objects)
+ {
+ if (!format.Contains("{}"))
+ {
+ tf_output_redirect.WriteLine(format + " " + string.Join(" ", objects.Select(x => x.ToString())));
+ return;
+ }
+
+ foreach (var obj in objects)
+ {
+
+ }
+
+ tf_output_redirect.WriteLine(format);
+ }
+
+ public static int len(object a)
+ {
+ switch (a)
+ {
+ case Tensor tensor:
+ return (int)tensor.shape[0];
+ case Tensors arr:
+ return arr.Length;
+ case Array arr:
+ return arr.Length;
+ case IList arr:
+ return arr.Count;
+ case ICollection arr:
+ return arr.Count;
+ case IEnumerable enumerable:
+ return enumerable.OfType