| @@ -332,3 +332,7 @@ src/TensorFlowNET.Native/bazel-* | |||||
| src/TensorFlowNET.Native/c_api.h | src/TensorFlowNET.Native/c_api.h | ||||
| /.vscode | /.vscode | ||||
| test/TensorFlowNET.Examples/mnist | test/TensorFlowNET.Examples/mnist | ||||
| # training model resources | |||||
| .resources | |||||
| @@ -28,8 +28,14 @@ In comparison to other projects, like for instance TensorFlowSharp which only pr | |||||
| Install TF.NET and TensorFlow binary through NuGet. | Install TF.NET and TensorFlow binary through NuGet. | ||||
| ```sh | ```sh | ||||
| ### install tensorflow C# binding | |||||
| PM> Install-Package TensorFlow.NET | PM> Install-Package TensorFlow.NET | ||||
| ### Install tensorflow binary | |||||
| ### For CPU version | |||||
| PM> Install-Package SciSharp.TensorFlow.Redist | PM> Install-Package SciSharp.TensorFlow.Redist | ||||
| ### For GPU version (CUDA and cuDNN are required) | |||||
| PM> Install-Package SciSharp.TensorFlow.Redist-Windows-GPU | |||||
| ``` | ``` | ||||
| Import TF.NET. | Import TF.NET. | ||||
| @@ -17,7 +17,11 @@ Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "TensorFlowBenchmark", "src\ | |||||
| EndProject | EndProject | ||||
| Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "TensorFlowHub", "src\TensorFlowHub\TensorFlowHub.csproj", "{8FD59A5A-97EB-457E-B9F1-D88B0C822C6E}" | Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "TensorFlowHub", "src\TensorFlowHub\TensorFlowHub.csproj", "{8FD59A5A-97EB-457E-B9F1-D88B0C822C6E}" | ||||
| EndProject | EndProject | ||||
| Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "TensorFlowText", "src\TensorFlowText\TensorFlowText.csproj", "{B598E5D5-BD2D-4191-8532-F2FBAC31AB81}" | |||||
| Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "TensorFlowText", "src\TensorFlowText\TensorFlowText.csproj", "{B598E5D5-BD2D-4191-8532-F2FBAC31AB81}" | |||||
| EndProject | |||||
| Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "TensorFlowDatasets", "src\TensorFlowDatasets\TensorFlowDatasets.csproj", "{DF151A51-E9FD-41BD-B0F4-08A743755D44}" | |||||
| EndProject | |||||
| Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "TensorFlowNET.Examples.GPU", "test\TensorFlowNET.Examples\TensorFlowNET.Examples.GPU.csproj", "{6F6B3382-8F87-4CD9-BF87-C81D5405685A}" | |||||
| EndProject | EndProject | ||||
| Global | Global | ||||
| GlobalSection(SolutionConfigurationPlatforms) = preSolution | GlobalSection(SolutionConfigurationPlatforms) = preSolution | ||||
| @@ -57,6 +61,14 @@ Global | |||||
| {B598E5D5-BD2D-4191-8532-F2FBAC31AB81}.Debug|Any CPU.Build.0 = Debug|Any CPU | {B598E5D5-BD2D-4191-8532-F2FBAC31AB81}.Debug|Any CPU.Build.0 = Debug|Any CPU | ||||
| {B598E5D5-BD2D-4191-8532-F2FBAC31AB81}.Release|Any CPU.ActiveCfg = Release|Any CPU | {B598E5D5-BD2D-4191-8532-F2FBAC31AB81}.Release|Any CPU.ActiveCfg = Release|Any CPU | ||||
| {B598E5D5-BD2D-4191-8532-F2FBAC31AB81}.Release|Any CPU.Build.0 = Release|Any CPU | {B598E5D5-BD2D-4191-8532-F2FBAC31AB81}.Release|Any CPU.Build.0 = Release|Any CPU | ||||
| {DF151A51-E9FD-41BD-B0F4-08A743755D44}.Debug|Any CPU.ActiveCfg = Debug|Any CPU | |||||
| {DF151A51-E9FD-41BD-B0F4-08A743755D44}.Debug|Any CPU.Build.0 = Debug|Any CPU | |||||
| {DF151A51-E9FD-41BD-B0F4-08A743755D44}.Release|Any CPU.ActiveCfg = Release|Any CPU | |||||
| {DF151A51-E9FD-41BD-B0F4-08A743755D44}.Release|Any CPU.Build.0 = Release|Any CPU | |||||
| {6F6B3382-8F87-4CD9-BF87-C81D5405685A}.Debug|Any CPU.ActiveCfg = Debug|Any CPU | |||||
| {6F6B3382-8F87-4CD9-BF87-C81D5405685A}.Debug|Any CPU.Build.0 = Debug|Any CPU | |||||
| {6F6B3382-8F87-4CD9-BF87-C81D5405685A}.Release|Any CPU.ActiveCfg = Release|Any CPU | |||||
| {6F6B3382-8F87-4CD9-BF87-C81D5405685A}.Release|Any CPU.Build.0 = Release|Any CPU | |||||
| EndGlobalSection | EndGlobalSection | ||||
| GlobalSection(SolutionProperties) = preSolution | GlobalSection(SolutionProperties) = preSolution | ||||
| HideSolutionNode = FALSE | HideSolutionNode = FALSE | ||||
| @@ -115,7 +115,7 @@ namespace Keras | |||||
| var init = tf.global_variables_initializer(); | var init = tf.global_variables_initializer(); | ||||
| float loss_value = 0; | float loss_value = 0; | ||||
| with(tf.Session(graph), sess => | |||||
| using (var sess = tf.Session(graph)) | |||||
| { | { | ||||
| sess.run(init); | sess.run(init); | ||||
| var step = 0; | var step = 0; | ||||
| @@ -133,7 +133,7 @@ namespace Keras | |||||
| Console.WriteLine($"Step {step} loss: {loss_value}"); | Console.WriteLine($"Step {step} loss: {loss_value}"); | ||||
| } | } | ||||
| Console.WriteLine($"Final loss: {loss_value}"); | Console.WriteLine($"Final loss: {loss_value}"); | ||||
| }); | |||||
| } | |||||
| return loss_value; | return loss_value; | ||||
| } | } | ||||
| @@ -1,8 +1,14 @@ | |||||
| ## SciSharp.TensorFlow.Redist ## | ## SciSharp.TensorFlow.Redist ## | ||||
| `SciSharp.TensorFlow.Redist` is a migration from [Microsoft.ML.TensorFlow.Redist](https://github.com/dotnet/machinelearning/tree/release/1.2/src/Redist/Microsoft.ML.TensorFlow.Redist). [ML.NET](https://github.com/dotnet/machinelearning) team will not maintain the package since [ML.NET](https://www.nuget.org/packages/Microsoft.ML) v1.4.0 going forward. | |||||
| `SciSharp.TensorFlow.Redist` is a migration from [Microsoft.ML.TensorFlow.Redist](https://github.com/dotnet/machinelearning/tree/release/1.2/src/Redist/Microsoft.ML.TensorFlow.Redist). [ML.NET](https://github.com/dotnet/machinelearning) team will not maintain the package since [ML.NET](https://www.nuget.org/packages/Microsoft.ML) v1.3.0 going forward. | |||||
| * CPU version for all platforms (Windows, Linux, OSX) | |||||
| ```powershell | |||||
| PM> Install-Package SciSharp.TensorFlow.Redist | |||||
| ``` | |||||
| * GPU version for Windows | |||||
| ```powershell | ```powershell | ||||
| PM> Install-Package SciSharp.TensorFlow.Redist | PM> Install-Package SciSharp.TensorFlow.Redist | ||||
| ``` | ``` | ||||
| @@ -16,7 +22,7 @@ Related merged [commits](https://github.com/SciSharp/TensorFlow.NET/commit/854a5 | |||||
| On Windows, the tar command does not support extracting archives with symlinks. So when `dotnet pack` runs on Windows it will only package the Windows binaries. | On Windows, the tar command does not support extracting archives with symlinks. So when `dotnet pack` runs on Windows it will only package the Windows binaries. | ||||
| 1. Run `dotnet pack` under `src/SciSharp.TensorFlow.Redist` directory in Linux. | 1. Run `dotnet pack` under `src/SciSharp.TensorFlow.Redist` directory in Linux. | ||||
| 2. Run `nuget push SciSharp.TensorFlow.Redist.1.14.0.nupkg -k APIKEY -s https://api.nuget.org/v3/index.json` | |||||
| 2. Run `dotnet nuget push SciSharp.TensorFlow.Redist.1.14.0.nupkg -k APIKEY -s https://api.nuget.org/v3/index.json` | |||||
| @@ -9,7 +9,7 @@ | |||||
| <license type="file">LICENSE.txt</license> | <license type="file">LICENSE.txt</license> | ||||
| <licenseUrl>https://aka.ms/deprecateLicenseUrl</licenseUrl> | <licenseUrl>https://aka.ms/deprecateLicenseUrl</licenseUrl> | ||||
| <projectUrl>https://www.tensorflow.org/</projectUrl> | <projectUrl>https://www.tensorflow.org/</projectUrl> | ||||
| <description>$packageId$ contains the TensorFlow C library version $version$ redistributed as a NuGet package.</description> | |||||
| <description>$packageId$ contains the TensorFlow C library CPU version $version$ redistributed as a NuGet package.</description> | |||||
| <releaseNotes>https://github.com/tensorflow/tensorflow/releases/tag/v$version$</releaseNotes> | <releaseNotes>https://github.com/tensorflow/tensorflow/releases/tag/v$version$</releaseNotes> | ||||
| <copyright>Copyright 2019 The TensorFlow Authors. All rights reserved.</copyright> | <copyright>Copyright 2019 The TensorFlow Authors. All rights reserved.</copyright> | ||||
| <tags>TensorFlow</tags> | <tags>TensorFlow</tags> | ||||
| @@ -0,0 +1,26 @@ | |||||
| <?xml version="1.0" encoding="utf-8"?> | |||||
| <package xmlns="http://schemas.microsoft.com/packaging/2012/06/nuspec.xsd"> | |||||
| <metadata> | |||||
| <id>$packageId$</id> | |||||
| <version>$version$</version> | |||||
| <authors>The TensorFlow Authors</authors> | |||||
| <owners>The TensorFlow Authors</owners> | |||||
| <requireLicenseAcceptance>true</requireLicenseAcceptance> | |||||
| <license type="file">LICENSE.txt</license> | |||||
| <licenseUrl>https://aka.ms/deprecateLicenseUrl</licenseUrl> | |||||
| <projectUrl>https://www.tensorflow.org/</projectUrl> | |||||
| <description>$packageId$ contains the TensorFlow C library GPU version $version$ redistributed as a NuGet package.</description> | |||||
| <releaseNotes>https://github.com/tensorflow/tensorflow/releases/tag/v$version$</releaseNotes> | |||||
| <copyright>Copyright 2019 The TensorFlow Authors. All rights reserved.</copyright> | |||||
| <tags>TensorFlow</tags> | |||||
| <dependencies> | |||||
| <group targetFramework=".NETStandard2.0" /> | |||||
| </dependencies> | |||||
| </metadata> | |||||
| <files> | |||||
| <file src="CommonPackage.props" target="build\netstandard2.0\$packageId$.props" /> | |||||
| <file src="bin\packages\$packageId$\LICENSE.txt" target="LICENSE.txt" /> | |||||
| <file src="bin\packages\$packageId$\THIRD_PARTY_NOTICES.txt" target="THIRD_PARTY_NOTICES.txt" /> | |||||
| <file src="bin\packages\$packageId$\runtimes\**\*" target="runtimes" /> | |||||
| </files> | |||||
| </package> | |||||
| @@ -17,7 +17,7 @@ | |||||
| <NoBuild>true</NoBuild> | <NoBuild>true</NoBuild> | ||||
| <IncludeBuildOutput>false</IncludeBuildOutput> | <IncludeBuildOutput>false</IncludeBuildOutput> | ||||
| <NuspecFile>Redist.nuspec</NuspecFile> | |||||
| <NuspecFile>Redist-CPU.nuspec</NuspecFile> | |||||
| <NuspecProperties>packageId=$(PackageId);version=$(PackageVersion)</NuspecProperties> | <NuspecProperties>packageId=$(PackageId);version=$(PackageVersion)</NuspecProperties> | ||||
| <NuspecBasePath>$(ProjDir)</NuspecBasePath> | <NuspecBasePath>$(ProjDir)</NuspecBasePath> | ||||
| @@ -0,0 +1,187 @@ | |||||
| <Project Sdk="Microsoft.NET.Sdk"> | |||||
| <PropertyGroup> | |||||
| <ProjDir>$(MSBuildThisFileDirectory)</ProjDir> | |||||
| <BinDir>$(ProjDir)bin\</BinDir> | |||||
| <ObjDir>$(ProjDir)obj\</ObjDir> | |||||
| <TargetArchitecture Condition="'$(TargetArchitecture)' == ''">x64</TargetArchitecture> | |||||
| <TargetFramework>netstandard2.0</TargetFramework> | |||||
| <TensorFlowVersion>1.14.0</TensorFlowVersion> | |||||
| <TensorFlowMajorVersion>1</TensorFlowMajorVersion> | |||||
| <PackageAssetsPath>$(BinDir)packages\</PackageAssetsPath> | |||||
| <PackageId>$(MSBuildProjectName)</PackageId> | |||||
| <PackageVersion>$(TensorFlowVersion)</PackageVersion> | |||||
| <NoBuild>true</NoBuild> | |||||
| <IncludeBuildOutput>false</IncludeBuildOutput> | |||||
| <NuspecFile>Redist-Windows-GPU.nuspec</NuspecFile> | |||||
| <NuspecProperties>packageId=$(PackageId);version=$(PackageVersion)</NuspecProperties> | |||||
| <NuspecBasePath>$(ProjDir)</NuspecBasePath> | |||||
| <GenerateNuspecDependsOn>CopyFilesFromArchive</GenerateNuspecDependsOn> | |||||
| <PackageRid Condition="'$(OS)' == 'Windows_NT'">win</PackageRid> | |||||
| <PackageRid Condition="'$(OS)' != 'Windows_NT'">linux</PackageRid> | |||||
| <PackageRid Condition="$([MSBuild]::IsOSPlatform('osx'))">osx</PackageRid> | |||||
| <PackageRid>$(PackageRid)-$(TargetArchitecture)</PackageRid> | |||||
| </PropertyGroup> | |||||
| <PropertyGroup> | |||||
| <IncludeMLNetNotices>false</IncludeMLNetNotices> | |||||
| </PropertyGroup> | |||||
| <ItemGroup> | |||||
| <TensorFlowConfig Include="windows" | |||||
| FileExtension=".zip" | |||||
| FilesFromArchive="lib\tensorflow.dll; | |||||
| include\tensorflow\c\LICENSE" | |||||
| Runtime="win-x64"/> | |||||
| <TensorFlowConfig Condition="'$(OS)' != 'Windows_NT'" | |||||
| Include="linux" | |||||
| FileExtension=".tar.gz" | |||||
| FilesFromArchive="lib\libtensorflow.so; | |||||
| lib\libtensorflow_framework.so.$(TensorFlowMajorVersion); | |||||
| include\tensorflow\c\LICENSE" | |||||
| Runtime="linux-x64" /> | |||||
| <TensorFlowConfig Condition="'$(OS)' != 'Windows_NT'" | |||||
| Include="darwin" FileExtension=".tar.gz" | |||||
| FilesFromArchive="lib\libtensorflow.dylib; | |||||
| lib\libtensorflow_framework.$(TensorFlowMajorVersion).dylib; | |||||
| include\tensorflow\c\LICENSE" | |||||
| Runtime="osx-x64" /> | |||||
| <AdditionalDownloadFile Include="https://raw.githubusercontent.com/tensorflow/tensorflow/master/LICENSE" | |||||
| DownloadFile="$(BinDir)LICENSE" /> | |||||
| </ItemGroup> | |||||
| <Target Name="PrepareArchives"> | |||||
| <ItemGroup> | |||||
| <!-- although we could extract all archives on all machines, mac requires a fixup which can only be run on mac | |||||
| so we split these per-rid and join during the official build packaging. --> | |||||
| <TensorFlowArchive | |||||
| Include="@(TensorFlowConfig->'https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-gpu-%(Identity)-x86_64-$(TensorFlowVersion)%(FileExtension)')" /> | |||||
| <!-- set up metdata used by all targets --> | |||||
| <TensorFlowArchive DownloadFile="$(BinDir)%(FileName)%(Extension)" | |||||
| DownloadShaFile="$(BinDir)%(FileName)%(Extension).sha" | |||||
| ExtractDirectory="$(BinDir)%(FileName)" | |||||
| ExtractSemaphore="$(BinDir)%(FileName)\.extracted" | |||||
| LocalShaFile="$(MSBuildProjectDirectory)\%(FileName)%(Extension).sha"/> | |||||
| </ItemGroup> | |||||
| <Message Importance="high" Text="%(TensorFlowConfig.Runtime)"/> | |||||
| </Target> | |||||
| <Target Name="DownloadArchives" | |||||
| DependsOnTargets="PrepareArchives" | |||||
| Inputs="$(MSBuildProjectFile)" | |||||
| Outputs="@(TensorFlowArchive->'%(DownloadFile)');@(AdditionalDownloadFile->'%(DownloadFile)')"> | |||||
| <MakeDir Directories="$(BinDir)" /> | |||||
| <ItemGroup> | |||||
| <_downloadFiles Include="@(TensorFlowArchive);@(AdditionalDownloadFile)" Url="%(Identity)" DestinationFile="%(DownloadFile)" /> | |||||
| </ItemGroup> | |||||
| <Message Importance="High" Text="Downloading '%(_downloadFiles.Identity)' to '$(BinDir)'." /> | |||||
| <DownloadFile SourceUrl="%(_downloadFiles.Identity)" DestinationFolder="$(BinDir)"> | |||||
| <Output TaskParameter="DownloadedFile" ItemName="Content" /> | |||||
| </DownloadFile> | |||||
| </Target> | |||||
| <Target Name="ValidateAndExtractArchives" | |||||
| DependsOnTargets="DownloadArchives" | |||||
| Inputs="@(TensorFlowArchive->'%(DownloadFile)')" | |||||
| Outputs="@(TensorFlowArchive->'%(ExtractSemaphore)')"> | |||||
| <GetFileHash Files="@(TensorFlowArchive->'%(DownloadFile)')" Algorithm="SHA512"> | |||||
| <Output | |||||
| TaskParameter="Items" | |||||
| ItemName="FilesWithHashes" /> | |||||
| </GetFileHash> | |||||
| <WriteLinesToFile File="%(FilesWithHashes.Identity).sha" Lines="%(FilesWithHashes.FileHash)" Overwrite="true"/> | |||||
| <!-- If specified we'll update the checked in SHAs with the downloaded ones. --> | |||||
| <Copy Condition="'$(UpdateSHA)' == 'true'" | |||||
| SourceFiles="@(TensorFlowArchive->'%(DownloadShaFile)')" | |||||
| DestinationFiles="@(TensorFlowArchive->'%(LocalShaFile)')" /> | |||||
| <ItemGroup> | |||||
| <TensorFlowArchive> | |||||
| <DownloadSha>@(FilesWithHashes->'%(FileHash)')</DownloadSha> | |||||
| <LocalSha>$([System.IO.File]::ReadAllText('%(LocalShaFile)').Replace("%0A", "").Replace("%0D", ""))</LocalSha> | |||||
| </TensorFlowArchive> | |||||
| </ItemGroup> | |||||
| <Error Condition="!Exists('%(TensorFlowArchive.LocalShaFile)')" Text="SHA file '%(TensorFlowArchive.LocalShaFile)' does not exist. Build with /p:UpdateSHA=true to save it." /> | |||||
| <Message Importance="High" Text="@TensorFlowArchive->'%(TensorFlowArchive.DownloadFile) - %(TensorFlowArchive.LocalSha) - %(TensorFlowArchive.DownloadSha)"/> | |||||
| <!-- Validate that the downloaded SHAs match the expected checked in SHAs --> | |||||
| <Error Condition="'%(TensorFlowArchive.LocalSha)' != '%(TensorFlowArchive.DownloadSha)'" Text="Downloaded file '%(TensorFlowArchive.DownloadFile)' has unexpected SHA.%0A expected: %(TensorFlowArchive.LocalSha)%0A --actual: %(TensorFlowArchive.DownloadSha)%0ABuild with /p:UpdateSHA=true if you intentionally changed the URL and wish to update the SHAs, otherwise this could indicate an incomplete download or intercerpted URL and should be examined." /> | |||||
| <!-- The archives are valid, lets extract them, ensuring an empty directory --> | |||||
| <RemoveDir Directories="@(TensorFlowArchive->'%(ExtractDirectory)')" /> | |||||
| <MakeDir Directories="@(TensorFlowArchive->'%(ExtractDirectory)')" /> | |||||
| <Message Importance="High" Text="Decompressing '%(TensorFlowArchive.DownloadFile)' to '%(TensorFlowArchive.ExtractDirectory)'." /> | |||||
| <Unzip Condition="'%(TensorFlowArchive.FileExtension)' == '.zip'" | |||||
| SourceFiles="%(TensorFlowArchive.DownloadFile)" | |||||
| DestinationFolder="%(TensorFlowArchive.ExtractDirectory)" /> | |||||
| <Exec Condition="'$(OS)' != 'Windows_NT' AND '%(TensorFlowArchive.FileExtension)' == '.tar.gz'" | |||||
| WorkingDirectory="$(MSBuildThisFileDirectory)" | |||||
| Command="tar -xzm --hard-dereference -f %(TensorFlowArchive.DownloadFile) -C %(TensorFlowArchive.ExtractDirectory)" /> | |||||
| <Exec Condition="'$(OS)' != 'Windows_NT'" | |||||
| Command="chmod -R +w %(TensorFlowArchive.ExtractDirectory)" /> | |||||
| <Touch Files="@(TensorFlowArchive->'%(ExtractSemaphore)')" AlwaysCreate="true" /> | |||||
| </Target> | |||||
| <!-- Select the files we want to copy out of each archive. --> | |||||
| <Target Name="GetFilesFromArchive" | |||||
| DependsOnTargets="ValidateAndExtractArchives" > | |||||
| <ItemGroup> | |||||
| <!-- batch rather than transform so that we can split FilesFromArchive metadata --> | |||||
| <_fileFromArchive Include="%(TensorFlowArchive.FilesFromArchive)" ExtractDirectory="%(TensorFlowArchive.ExtractDirectory)" Runtime="%(TensorFlowArchive.Runtime)" /> | |||||
| <_fileFromArchive DestinationFile="%(FileName)%(Extension)"/> | |||||
| <_fileFromArchive PackagePath="runtimes\%(_fileFromArchive.Runtime)\native\%(_fileFromArchive.DestinationFile)" /> | |||||
| <!-- LICENSE from the package is actually THIRD_PARTY_NOTICES--> | |||||
| <_fileFromArchive Condition="'%(DestinationFile)' == 'LICENSE'" PackagePath="THIRD_PARTY_NOTICES.txt" Runtime="" /> | |||||
| <!-- copy to packaging location --> | |||||
| <FilesFromArchive Include="@(_fileFromArchive->'%(ExtractDirectory)\%(Identity)')" | |||||
| TargetPath="$(PackageAssetsPath)$(MSBuildProjectName)\%(PackagePath)" /> | |||||
| <!-- include LICENSE that was downloaded from GitHub --> | |||||
| <FilesFromArchive Include="$(BinDir)\LICENSE" | |||||
| TargetPath="$(PackageAssetsPath)$(MSBuildProjectName)\LICENSE.txt" /> | |||||
| <!-- copy to NativeAssets location, only for current RID, so that they may be used by tests --> | |||||
| <!--<FilesFromArchive Condition="'$(PackageRID)' == '%(_fileFromArchive.Runtime)'" | |||||
| Include="@(_fileFromArchive->'%(ExtractDirectory)\%(Identity)')" | |||||
| TargetPath="$(NativeAssetsBuiltPath)\%(_fileFromArchive.DestinationFile)" />--> | |||||
| </ItemGroup> | |||||
| </Target> | |||||
| <Target Name="CopyFilesFromArchive" | |||||
| DependsOnTargets="GetFilesFromArchive"> | |||||
| <Message Importance="High" Text="@(FilesFromArchive) -> %(FilesFromArchive.TargetPath)" /> | |||||
| <Copy SourceFiles="@(FilesFromArchive)" | |||||
| DestinationFiles="@(FilesFromArchive->'%(TargetPath)')" /> | |||||
| </Target> | |||||
| <Target Name="Clean"> | |||||
| <Message Importance="High" Text="Deleting $(BinDir);$(ObjDir)" /> | |||||
| <RemoveDir Directories="$(BinDir);$(ObjDir)" /> | |||||
| </Target> | |||||
| </Project> | |||||
| @@ -0,0 +1 @@ | |||||
| 850A27858FA951DF77A78CD1BD78B54F6EE2532DD5A49F0579A7B02C795C62F0212F20177EAEA2BD77BD451A57FBBD1348362492F9E14BFE5CA5028C71711293 | |||||
| @@ -0,0 +1,24 @@ | |||||
| using System; | |||||
| namespace TensorFlowDatasets | |||||
| { | |||||
| /// <summary> | |||||
| /// Abstract base class for all datasets. | |||||
| /// </summary> | |||||
| public class DatasetBuilder | |||||
| { | |||||
| /// <summary> | |||||
| /// Downloads and prepares dataset for reading. | |||||
| /// </summary> | |||||
| /// <param name="download_dir"> | |||||
| /// directory where downloaded files are stored. | |||||
| /// </param> | |||||
| /// <param name="download_config"> | |||||
| /// further configuration for downloading and preparing dataset. | |||||
| /// </param> | |||||
| public void download_and_prepare(string download_dir = null, DownloadConfig download_config = null) | |||||
| { | |||||
| } | |||||
| } | |||||
| } | |||||
| @@ -0,0 +1,10 @@ | |||||
| using System; | |||||
| using System.Collections.Generic; | |||||
| using System.Text; | |||||
| namespace TensorFlowDatasets | |||||
| { | |||||
| public class DownloadConfig | |||||
| { | |||||
| } | |||||
| } | |||||
| @@ -0,0 +1,19 @@ | |||||
| <Project Sdk="Microsoft.NET.Sdk"> | |||||
| <PropertyGroup> | |||||
| <TargetFramework>netcoreapp2.2</TargetFramework> | |||||
| <PackageId>SciSharp.TensorFlowDatasets</PackageId> | |||||
| <Version>0.0.1</Version> | |||||
| <Authors>SciSharp Team</Authors> | |||||
| <Product>TensorFlow Datasets</Product> | |||||
| <GeneratePackageOnBuild>true</GeneratePackageOnBuild> | |||||
| <PackageIconUrl>https://avatars3.githubusercontent.com/u/44989469?s=200&v=4</PackageIconUrl> | |||||
| <PackageProjectUrl>http://scisharpstack.org</PackageProjectUrl> | |||||
| <Description>TensorFlow Datasets provides many public datasets as tf.data.Datasets.</Description> | |||||
| <RepositoryUrl>https://github.com/SciSharp/TensorFlow.NET</RepositoryUrl> | |||||
| <RepositoryType>git</RepositoryType> | |||||
| <PackageTags>SciSharp, Dataset, TensorFlow</PackageTags> | |||||
| <Copyright>Apache 2.0</Copyright> | |||||
| </PropertyGroup> | |||||
| </Project> | |||||
| @@ -27,5 +27,54 @@ namespace Tensorflow.Hub | |||||
| labels.astype(dataType); | labels.astype(dataType); | ||||
| Labels = labels; | Labels = labels; | ||||
| } | } | ||||
| public (NDArray, NDArray) GetNextBatch(int batch_size, bool fake_data = false, bool shuffle = true) | |||||
| { | |||||
| var start = IndexInEpoch; | |||||
| // Shuffle for the first epoch | |||||
| if(EpochsCompleted == 0 && start == 0 && shuffle) | |||||
| { | |||||
| var perm0 = np.arange(NumOfExamples); | |||||
| np.random.shuffle(perm0); | |||||
| Data = Data[perm0]; | |||||
| Labels = Labels[perm0]; | |||||
| } | |||||
| // Go to the next epoch | |||||
| if (start + batch_size > NumOfExamples) | |||||
| { | |||||
| // Finished epoch | |||||
| EpochsCompleted += 1; | |||||
| // Get the rest examples in this epoch | |||||
| var rest_num_examples = NumOfExamples - start; | |||||
| //var images_rest_part = _images[np.arange(start, _num_examples)]; | |||||
| //var labels_rest_part = _labels[np.arange(start, _num_examples)]; | |||||
| // Shuffle the data | |||||
| if (shuffle) | |||||
| { | |||||
| var perm = np.arange(NumOfExamples); | |||||
| np.random.shuffle(perm); | |||||
| Data = Data[perm]; | |||||
| Labels = Labels[perm]; | |||||
| } | |||||
| start = 0; | |||||
| IndexInEpoch = batch_size - rest_num_examples; | |||||
| var end = IndexInEpoch; | |||||
| var images_new_part = Data[np.arange(start, end)]; | |||||
| var labels_new_part = Labels[np.arange(start, end)]; | |||||
| /*return (np.concatenate(new float[][] { images_rest_part.Data<float>(), images_new_part.Data<float>() }, axis: 0), | |||||
| np.concatenate(new float[][] { labels_rest_part.Data<float>(), labels_new_part.Data<float>() }, axis: 0));*/ | |||||
| return (images_new_part, labels_new_part); | |||||
| } | |||||
| else | |||||
| { | |||||
| IndexInEpoch += batch_size; | |||||
| var end = IndexInEpoch; | |||||
| return (Data[np.arange(start, end)], Labels[np.arange(start, end)]); | |||||
| } | |||||
| } | |||||
| } | } | ||||
| } | } | ||||
| @@ -15,14 +15,26 @@ namespace Tensorflow.Hub | |||||
| private const string TEST_IMAGES = "t10k-images-idx3-ubyte.gz"; | private const string TEST_IMAGES = "t10k-images-idx3-ubyte.gz"; | ||||
| private const string TEST_LABELS = "t10k-labels-idx1-ubyte.gz"; | private const string TEST_LABELS = "t10k-labels-idx1-ubyte.gz"; | ||||
| public static async Task<Datasets<MnistDataSet>> LoadAsync(string trainDir, bool oneHot = false) | |||||
| public static async Task<Datasets<MnistDataSet>> LoadAsync(string trainDir, bool oneHot = false, int? trainSize = null, int? validationSize = null, int? testSize = null) | |||||
| { | { | ||||
| var loader = new MnistModelLoader(); | var loader = new MnistModelLoader(); | ||||
| return await loader.LoadAsync(new ModelLoadSetting | |||||
| var setting = new ModelLoadSetting | |||||
| { | { | ||||
| TrainDir = trainDir, | TrainDir = trainDir, | ||||
| OneHot = oneHot | OneHot = oneHot | ||||
| }); | |||||
| }; | |||||
| if (trainSize.HasValue) | |||||
| setting.TrainSize = trainSize.Value; | |||||
| if (validationSize.HasValue) | |||||
| setting.ValidationSize = validationSize.Value; | |||||
| if (testSize.HasValue) | |||||
| setting.TestSize = testSize.Value; | |||||
| return await loader.LoadAsync(setting); | |||||
| } | } | ||||
| public async Task<Datasets<MnistDataSet>> LoadAsync(ModelLoadSetting setting) | public async Task<Datasets<MnistDataSet>> LoadAsync(ModelLoadSetting setting) | ||||
| @@ -86,7 +98,7 @@ namespace Tensorflow.Hub | |||||
| var train = new MnistDataSet(trainImages, trainLabels, dtype, reshape); | var train = new MnistDataSet(trainImages, trainLabels, dtype, reshape); | ||||
| var validation = new MnistDataSet(validationImages, validationLabels, dtype, reshape); | var validation = new MnistDataSet(validationImages, validationLabels, dtype, reshape); | ||||
| var test = new MnistDataSet(trainImages, trainLabels, dtype, reshape); | |||||
| var test = new MnistDataSet(testImages, testLabels, dtype, reshape); | |||||
| return new Datasets<MnistDataSet>(train, validation, test); | return new Datasets<MnistDataSet>(train, validation, test); | ||||
| } | } | ||||
| @@ -25,13 +25,25 @@ namespace Tensorflow.Hub | |||||
| if (!Path.IsPathRooted(dirSaveTo)) | if (!Path.IsPathRooted(dirSaveTo)) | ||||
| dirSaveTo = Path.Combine(AppContext.BaseDirectory, dirSaveTo); | dirSaveTo = Path.Combine(AppContext.BaseDirectory, dirSaveTo); | ||||
| if (!Directory.Exists(dirSaveTo)) | |||||
| Directory.CreateDirectory(dirSaveTo); | |||||
| using (var wc = new WebClient()) | |||||
| var fileSaveTo = Path.Combine(dirSaveTo, fileName); | |||||
| if (File.Exists(fileSaveTo)) | |||||
| { | { | ||||
| await wc.DownloadFileTaskAsync(url, Path.Combine(dirSaveTo, fileName)); | |||||
| //TODO:maybe you can check file's hashcode and "donglowad.info" to complete file ... | |||||
| Console.WriteLine($"{fileSaveTo} already exists."); | |||||
| } | } | ||||
| else | |||||
| { | |||||
| if (!Directory.Exists(dirSaveTo)) | |||||
| Directory.CreateDirectory(dirSaveTo); | |||||
| using (var wc = new WebClient()) | |||||
| { | |||||
| await wc.DownloadFileTaskAsync(url, fileSaveTo); | |||||
| } | |||||
| } | |||||
| } | } | ||||
| public static async Task UnzipAsync<TDataSet>(this IModelLoader<TDataSet> modelLoader, string zipFile, string saveTo) | public static async Task UnzipAsync<TDataSet>(this IModelLoader<TDataSet> modelLoader, string zipFile, string saveTo) | ||||
| @@ -42,7 +54,7 @@ namespace Tensorflow.Hub | |||||
| if (!Directory.Exists(saveTo)) | if (!Directory.Exists(saveTo)) | ||||
| Directory.CreateDirectory(saveTo); | Directory.CreateDirectory(saveTo); | ||||
| if (!Path.IsPathRooted(zipFile)) | if (!Path.IsPathRooted(zipFile)) | ||||
| zipFile = Path.Combine(AppContext.BaseDirectory, zipFile); | zipFile = Path.Combine(AppContext.BaseDirectory, zipFile); | ||||
| @@ -78,7 +90,7 @@ namespace Tensorflow.Hub | |||||
| var cts = new CancellationTokenSource(); | var cts = new CancellationTokenSource(); | ||||
| var showProgressTask = ShowProgressInConsole(cts); | var showProgressTask = ShowProgressInConsole(cts); | ||||
| try | try | ||||
| { | { | ||||
| await task; | await task; | ||||
| @@ -86,7 +98,7 @@ namespace Tensorflow.Hub | |||||
| finally | finally | ||||
| { | { | ||||
| cts.Cancel(); | cts.Cancel(); | ||||
| } | |||||
| } | |||||
| } | } | ||||
| private static async Task ShowProgressInConsole(CancellationTokenSource cts) | private static async Task ShowProgressInConsole(CancellationTokenSource cts) | ||||
| @@ -189,6 +189,9 @@ namespace Tensorflow | |||||
| public static Tensor log1p(Tensor x, string name = null) | public static Tensor log1p(Tensor x, string name = null) | ||||
| => gen_math_ops.log1p(x, name); | => gen_math_ops.log1p(x, name); | ||||
| public static Tensor logical_and(Tensor x, Tensor y, string name = null) | |||||
| => gen_math_ops.logical_and(x, y, name); | |||||
| /// <summary> | /// <summary> | ||||
| /// Clips tensor values to a specified min and max. | /// Clips tensor values to a specified min and max. | ||||
| /// </summary> | /// </summary> | ||||
| @@ -136,7 +136,7 @@ namespace Tensorflow | |||||
| public static Tensor bias_add(Tensor value, RefVariable bias, string data_format = null, string name = null) | public static Tensor bias_add(Tensor value, RefVariable bias, string data_format = null, string name = null) | ||||
| { | { | ||||
| return Python.with(ops.name_scope(name, "BiasAdd", new { value, bias }), scope => | |||||
| return Python.tf_with(ops.name_scope(name, "BiasAdd", new { value, bias }), scope => | |||||
| { | { | ||||
| name = scope; | name = scope; | ||||
| return gen_nn_ops.bias_add(value, bias, data_format: data_format, name: name); | return gen_nn_ops.bias_add(value, bias, data_format: data_format, name: name); | ||||
| @@ -169,7 +169,7 @@ namespace Tensorflow | |||||
| /// <returns></returns> | /// <returns></returns> | ||||
| public static Tensor softmax_cross_entropy_with_logits(Tensor labels, Tensor logits, int dim = -1, string name = null) | public static Tensor softmax_cross_entropy_with_logits(Tensor labels, Tensor logits, int dim = -1, string name = null) | ||||
| { | { | ||||
| with(ops.name_scope(name, "softmax_cross_entropy_with_logits_sg", new { logits, labels }), scope => | |||||
| tf_with(ops.name_scope(name, "softmax_cross_entropy_with_logits_sg", new { logits, labels }), scope => | |||||
| { | { | ||||
| name = scope; | name = scope; | ||||
| labels = array_ops.stop_gradient(labels, name: "labels_stop_gradient"); | labels = array_ops.stop_gradient(labels, name: "labels_stop_gradient"); | ||||
| @@ -19,10 +19,8 @@ using System.Runtime.InteropServices; | |||||
| namespace Tensorflow | namespace Tensorflow | ||||
| { | { | ||||
| public class Buffer : IDisposable | |||||
| public class Buffer : DisposableObject | |||||
| { | { | ||||
| private IntPtr _handle; | |||||
| private TF_Buffer buffer => Marshal.PtrToStructure<TF_Buffer>(_handle); | private TF_Buffer buffer => Marshal.PtrToStructure<TF_Buffer>(_handle); | ||||
| public byte[] Data | public byte[] Data | ||||
| @@ -30,8 +28,8 @@ namespace Tensorflow | |||||
| get | get | ||||
| { | { | ||||
| var data = new byte[buffer.length]; | var data = new byte[buffer.length]; | ||||
| if (buffer.length > 0) | |||||
| Marshal.Copy(buffer.data, data, 0, (int)buffer.length); | |||||
| if (data.Length > 0) | |||||
| Marshal.Copy(buffer.data, data, 0, data.Length); | |||||
| return data; | return data; | ||||
| } | } | ||||
| } | } | ||||
| @@ -54,6 +52,8 @@ namespace Tensorflow | |||||
| Marshal.Copy(data, 0, dst, data.Length); | Marshal.Copy(data, 0, dst, data.Length); | ||||
| _handle = c_api.TF_NewBufferFromString(dst, (ulong)data.Length); | _handle = c_api.TF_NewBufferFromString(dst, (ulong)data.Length); | ||||
| Marshal.FreeHGlobal(dst); | |||||
| } | } | ||||
| public static implicit operator IntPtr(Buffer buffer) | public static implicit operator IntPtr(Buffer buffer) | ||||
| @@ -66,9 +66,7 @@ namespace Tensorflow | |||||
| return buffer.Data; | return buffer.Data; | ||||
| } | } | ||||
| public void Dispose() | |||||
| { | |||||
| c_api.TF_DeleteBuffer(_handle); | |||||
| } | |||||
| protected override void DisposeUnManagedState(IntPtr handle) | |||||
| => c_api.TF_DeleteBuffer(handle); | |||||
| } | } | ||||
| } | } | ||||
| @@ -68,7 +68,7 @@ namespace Tensorflow.Clustering | |||||
| private Tensor _initialize() | private Tensor _initialize() | ||||
| { | { | ||||
| return with(ops.control_dependencies(new Operation[] | |||||
| return tf_with(ops.control_dependencies(new Operation[] | |||||
| { | { | ||||
| check_ops.assert_positive(_num_remaining) | check_ops.assert_positive(_num_remaining) | ||||
| }), delegate | }), delegate | ||||
| @@ -0,0 +1,77 @@ | |||||
| /***************************************************************************** | |||||
| Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. | |||||
| Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| you may not use this file except in compliance with the License. | |||||
| You may obtain a copy of the License at | |||||
| http://www.apache.org/licenses/LICENSE-2.0 | |||||
| Unless required by applicable law or agreed to in writing, software | |||||
| distributed under the License is distributed on an "AS IS" BASIS, | |||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| See the License for the specific language governing permissions and | |||||
| limitations under the License. | |||||
| ******************************************************************************/ | |||||
| using System; | |||||
| using System.Collections.Generic; | |||||
| using System.Text; | |||||
| namespace Tensorflow | |||||
| { | |||||
| /// <summary> | |||||
| /// Abstract class for disposable object allocated in unmanaged runtime. | |||||
| /// </summary> | |||||
| public abstract class DisposableObject : IDisposable | |||||
| { | |||||
| protected IntPtr _handle; | |||||
| protected DisposableObject() { } | |||||
| public DisposableObject(IntPtr handle) | |||||
| { | |||||
| _handle = handle; | |||||
| } | |||||
| protected virtual void DisposeManagedState() | |||||
| { | |||||
| } | |||||
| protected abstract void DisposeUnManagedState(IntPtr handle); | |||||
| protected virtual void Dispose(bool disposing) | |||||
| { | |||||
| if (disposing) | |||||
| { | |||||
| // free unmanaged resources (unmanaged objects) and override a finalizer below. | |||||
| if (_handle != IntPtr.Zero) | |||||
| { | |||||
| // dispose managed state (managed objects). | |||||
| DisposeManagedState(); | |||||
| // set large fields to null. | |||||
| DisposeUnManagedState(_handle); | |||||
| _handle = IntPtr.Zero; | |||||
| } | |||||
| } | |||||
| } | |||||
| // override a finalizer only if Dispose(bool disposing) above has code to free unmanaged resources. | |||||
| ~DisposableObject() | |||||
| { | |||||
| // Do not change this code. Put cleanup code in Dispose(bool disposing) above. | |||||
| Dispose(false); | |||||
| } | |||||
| // This code added to correctly implement the disposable pattern. | |||||
| public void Dispose() | |||||
| { | |||||
| // Do not change this code. Put cleanup code in Dispose(bool disposing) above. | |||||
| Dispose(true); | |||||
| // uncomment the following line if the finalizer is overridden above. | |||||
| GC.SuppressFinalize(this); | |||||
| } | |||||
| } | |||||
| } | |||||
| @@ -128,7 +128,7 @@ namespace Tensorflow | |||||
| IntPtr c_op; | IntPtr c_op; | ||||
| while ((c_op = c_api.TF_GraphNextOperation(graph, ref pos)) != IntPtr.Zero) | while ((c_op = c_api.TF_GraphNextOperation(graph, ref pos)) != IntPtr.Zero) | ||||
| { | { | ||||
| yield return c_op; | |||||
| yield return new Operation(c_op, graph); | |||||
| } | } | ||||
| } | } | ||||
| } | } | ||||
| @@ -42,7 +42,7 @@ namespace Tensorflow | |||||
| string prefix = ""; | string prefix = ""; | ||||
| var graph = ops.get_default_graph(); | var graph = ops.get_default_graph(); | ||||
| with(ops.name_scope(name, "import", input_map.Values), scope => | |||||
| tf_with(ops.name_scope(name, "import", input_map.Values), scope => | |||||
| { | { | ||||
| prefix = scope; | prefix = scope; | ||||
| /*if (!string.IsNullOrEmpty(prefix)) | /*if (!string.IsNullOrEmpty(prefix)) | ||||
| @@ -55,7 +55,7 @@ namespace Tensorflow | |||||
| **/ | **/ | ||||
| var grads = new Dictionary<string, List<List<Tensor>>>(); | var grads = new Dictionary<string, List<List<Tensor>>>(); | ||||
| with(ops.name_scope(name, "gradients", | |||||
| tf_with(ops.name_scope(name, "gradients", | |||||
| values: ys.Concat(xs).Concat(stop_gradients).Concat(grad_ys)), scope => | values: ys.Concat(xs).Concat(stop_gradients).Concat(grad_ys)), scope => | ||||
| { | { | ||||
| string grad_scope = scope; | string grad_scope = scope; | ||||
| @@ -141,7 +141,7 @@ namespace Tensorflow | |||||
| } | } | ||||
| } | } | ||||
| with(ops.name_scope(op.name + "_grad"), scope1 => | |||||
| tf_with(ops.name_scope(op.name + "_grad"), scope1 => | |||||
| { | { | ||||
| string name1 = scope1; | string name1 = scope1; | ||||
| if (grad_fn != null) | if (grad_fn != null) | ||||
| @@ -90,7 +90,7 @@ namespace Tensorflow.Gradients | |||||
| { | { | ||||
| var grad = grads[0]; | var grad = grads[0]; | ||||
| var y = op.outputs[0]; // y = e^x | var y = op.outputs[0]; // y = e^x | ||||
| return with(ops.control_dependencies(new Operation[] { grad }), dp => { | |||||
| return tf_with(ops.control_dependencies(new Operation[] { grad }), dp => { | |||||
| y = math_ops.conj(y); | y = math_ops.conj(y); | ||||
| return new Tensor[] { math_ops.mul_no_nan(y, grad) }; | return new Tensor[] { math_ops.mul_no_nan(y, grad) }; | ||||
| }); | }); | ||||
| @@ -107,7 +107,7 @@ namespace Tensorflow.Gradients | |||||
| { | { | ||||
| var grad = grads[0]; | var grad = grads[0]; | ||||
| var x = op.inputs[0]; | var x = op.inputs[0]; | ||||
| return with(ops.control_dependencies(new Operation[] { grad }), dp => { | |||||
| return tf_with(ops.control_dependencies(new Operation[] { grad }), dp => { | |||||
| x = math_ops.conj(x); | x = math_ops.conj(x); | ||||
| return new Tensor[] { grad * math_ops.digamma(x) }; | return new Tensor[] { grad * math_ops.digamma(x) }; | ||||
| }); | }); | ||||
| @@ -118,7 +118,7 @@ namespace Tensorflow.Gradients | |||||
| { | { | ||||
| var grad = grads[0]; | var grad = grads[0]; | ||||
| var x = op.inputs[0]; | var x = op.inputs[0]; | ||||
| return with(ops.control_dependencies(new Operation[] { grad }), dp => { | |||||
| return tf_with(ops.control_dependencies(new Operation[] { grad }), dp => { | |||||
| x = math_ops.conj(x); | x = math_ops.conj(x); | ||||
| return new Tensor[] { grad * math_ops.reciprocal(x) }; | return new Tensor[] { grad * math_ops.reciprocal(x) }; | ||||
| }); | }); | ||||
| @@ -431,7 +431,7 @@ namespace Tensorflow.Gradients | |||||
| var grad = grads[0]; | var grad = grads[0]; | ||||
| var y = op.outputs[0]; | var y = op.outputs[0]; | ||||
| return with(ops.control_dependencies(grads), delegate | |||||
| return tf_with(ops.control_dependencies(grads), delegate | |||||
| { | { | ||||
| y = math_ops.conj(y); | y = math_ops.conj(y); | ||||
| return new Tensor[] { gen_math_ops.sigmoid_grad(y, grad) }; | return new Tensor[] { gen_math_ops.sigmoid_grad(y, grad) }; | ||||
| @@ -453,7 +453,7 @@ namespace Tensorflow.Gradients | |||||
| var grad = grads[0]; | var grad = grads[0]; | ||||
| var x = op.inputs[0]; | var x = op.inputs[0]; | ||||
| return with(ops.control_dependencies(grads), delegate | |||||
| return tf_with(ops.control_dependencies(grads), delegate | |||||
| { | { | ||||
| x = math_ops.conj(x); | x = math_ops.conj(x); | ||||
| var y = constant_op.constant(2.0f, dtype: x.dtype); | var y = constant_op.constant(2.0f, dtype: x.dtype); | ||||
| @@ -467,7 +467,7 @@ namespace Tensorflow.Gradients | |||||
| var grad = grads[0]; | var grad = grads[0]; | ||||
| var y = op.outputs[0]; | var y = op.outputs[0]; | ||||
| return with(ops.control_dependencies(grads), delegate | |||||
| return tf_with(ops.control_dependencies(grads), delegate | |||||
| { | { | ||||
| y = math_ops.conj(y); | y = math_ops.conj(y); | ||||
| return new Tensor[] { gen_math_ops.tanh_grad(y, grad) }; | return new Tensor[] { gen_math_ops.tanh_grad(y, grad) }; | ||||
| @@ -22,7 +22,7 @@ namespace Tensorflow | |||||
| { | { | ||||
| var buffer = new Buffer(); | var buffer = new Buffer(); | ||||
| c_api.TF_GraphToGraphDef(_handle, buffer, s); | c_api.TF_GraphToGraphDef(_handle, buffer, s); | ||||
| s.Check(); | |||||
| s.Check(true); | |||||
| // var def = GraphDef.Parser.ParseFrom(buffer); | // var def = GraphDef.Parser.ParseFrom(buffer); | ||||
| // buffer.Dispose(); | // buffer.Dispose(); | ||||
| @@ -31,8 +31,11 @@ namespace Tensorflow | |||||
| private GraphDef _as_graph_def(bool add_shapes = false) | private GraphDef _as_graph_def(bool add_shapes = false) | ||||
| { | { | ||||
| var buffer = ToGraphDef(Status); | |||||
| Status.Check(); | |||||
| var status = new Status(); | |||||
| var buffer = ToGraphDef(status); | |||||
| status.Check(true); | |||||
| status.Dispose(); | |||||
| var def = GraphDef.Parser.ParseFrom(buffer); | var def = GraphDef.Parser.ParseFrom(buffer); | ||||
| buffer.Dispose(); | buffer.Dispose(); | ||||
| @@ -43,16 +43,20 @@ namespace Tensorflow | |||||
| var bytes = File.ReadAllBytes(file_path); | var bytes = File.ReadAllBytes(file_path); | ||||
| var graph_def = new Tensorflow.Buffer(bytes); | var graph_def = new Tensorflow.Buffer(bytes); | ||||
| var opts = c_api.TF_NewImportGraphDefOptions(); | var opts = c_api.TF_NewImportGraphDefOptions(); | ||||
| c_api.TF_GraphImportGraphDef(_handle, graph_def, opts, Status); | |||||
| return Status; | |||||
| var status = new Status(); | |||||
| c_api.TF_GraphImportGraphDef(_handle, graph_def, opts, status); | |||||
| return status; | |||||
| } | } | ||||
| public Status Import(byte[] bytes) | |||||
| public Status Import(byte[] bytes, string prefix = "") | |||||
| { | { | ||||
| var graph_def = new Tensorflow.Buffer(bytes); | var graph_def = new Tensorflow.Buffer(bytes); | ||||
| var opts = c_api.TF_NewImportGraphDefOptions(); | var opts = c_api.TF_NewImportGraphDefOptions(); | ||||
| c_api.TF_GraphImportGraphDef(_handle, graph_def, opts, Status); | |||||
| return Status; | |||||
| c_api.TF_ImportGraphDefOptionsSetPrefix(opts, prefix); | |||||
| var status = new Status(); | |||||
| c_api.TF_GraphImportGraphDef(_handle, graph_def, opts, status); | |||||
| c_api.TF_DeleteImportGraphDefOptions(opts); | |||||
| return status; | |||||
| } | } | ||||
| public static Graph ImportFromPB(string file_path, string name = null) | public static Graph ImportFromPB(string file_path, string name = null) | ||||
| @@ -38,6 +38,31 @@ namespace Tensorflow | |||||
| return c_api.TF_NewOperation(_handle, opType, opName); | return c_api.TF_NewOperation(_handle, opType, opName); | ||||
| } | } | ||||
| public unsafe Operation[] ReturnOperations(IntPtr results) | |||||
| { | |||||
| TF_Operation return_oper_handle = new TF_Operation(); | |||||
| int num_return_opers = 0; | |||||
| c_api.TF_ImportGraphDefResultsReturnOperations(results, ref num_return_opers, ref return_oper_handle); | |||||
| Operation[] return_opers = new Operation[num_return_opers]; | |||||
| for (int i = 0; i < num_return_opers; i++) | |||||
| { | |||||
| var handle = return_oper_handle.node + Marshal.SizeOf<TF_Operation>() * i; | |||||
| return_opers[i] = new Operation(*(IntPtr*)handle); | |||||
| } | |||||
| return return_opers; | |||||
| } | |||||
| public Operation OperationByName(string operName) | |||||
| { | |||||
| return c_api.TF_GraphOperationByName(_handle, operName); | |||||
| } | |||||
| public ITensorOrOperation[] get_operations() | |||||
| { | |||||
| return _nodes_by_name.Values.Select(x => x).ToArray(); | |||||
| } | |||||
| /// <summary> | /// <summary> | ||||
| /// Returns the `Operation` with the given `name`. | /// Returns the `Operation` with the given `name`. | ||||
| /// | /// | ||||
| @@ -15,6 +15,7 @@ | |||||
| ******************************************************************************/ | ******************************************************************************/ | ||||
| using System; | using System; | ||||
| using System.Collections; | |||||
| using System.Collections.Generic; | using System.Collections.Generic; | ||||
| using System.Linq; | using System.Linq; | ||||
| using System.Runtime.InteropServices; | using System.Runtime.InteropServices; | ||||
| @@ -72,7 +73,7 @@ namespace Tensorflow | |||||
| all variables that are created during the construction of a graph. The caller | all variables that are created during the construction of a graph. The caller | ||||
| may define additional collections by specifying a new name. | may define additional collections by specifying a new name. | ||||
| */ | */ | ||||
| public partial class Graph : IPython, IDisposable | |||||
| public partial class Graph : IPython, IDisposable, IEnumerable<Operation> | |||||
| { | { | ||||
| private IntPtr _handle; | private IntPtr _handle; | ||||
| private Dictionary<int, ITensorOrOperation> _nodes_by_id; | private Dictionary<int, ITensorOrOperation> _nodes_by_id; | ||||
| @@ -87,8 +88,7 @@ namespace Tensorflow | |||||
| private string _graph_key; | private string _graph_key; | ||||
| public string graph_key => _graph_key; | public string graph_key => _graph_key; | ||||
| public string _last_loss_reduction; | public string _last_loss_reduction; | ||||
| public bool _is_loss_scaled_by_optimizer { get; set; } | |||||
| public Status Status { get; } | |||||
| public bool _is_loss_scaled_by_optimizer { get; set; } | |||||
| /// <summary> | /// <summary> | ||||
| /// True if the graph is considered "finalized". In that case no | /// True if the graph is considered "finalized". In that case no | ||||
| @@ -106,7 +106,6 @@ namespace Tensorflow | |||||
| public Graph() | public Graph() | ||||
| { | { | ||||
| _handle = c_api.TF_NewGraph(); | _handle = c_api.TF_NewGraph(); | ||||
| Status = new Status(); | |||||
| _nodes_by_id = new Dictionary<int, ITensorOrOperation>(); | _nodes_by_id = new Dictionary<int, ITensorOrOperation>(); | ||||
| _nodes_by_name = new Dictionary<string, ITensorOrOperation>(); | _nodes_by_name = new Dictionary<string, ITensorOrOperation>(); | ||||
| _names_in_use = new Dictionary<string, int>(); | _names_in_use = new Dictionary<string, int>(); | ||||
| @@ -116,11 +115,14 @@ namespace Tensorflow | |||||
| public Graph(IntPtr handle) | public Graph(IntPtr handle) | ||||
| { | { | ||||
| _handle = handle; | _handle = handle; | ||||
| Status = new Status(); | |||||
| _nodes_by_id = new Dictionary<int, ITensorOrOperation>(); | _nodes_by_id = new Dictionary<int, ITensorOrOperation>(); | ||||
| _nodes_by_name = new Dictionary<string, ITensorOrOperation>(); | _nodes_by_name = new Dictionary<string, ITensorOrOperation>(); | ||||
| _names_in_use = new Dictionary<string, int>(); | _names_in_use = new Dictionary<string, int>(); | ||||
| _graph_key = $"grap-key-{ops.uid()}/"; | _graph_key = $"grap-key-{ops.uid()}/"; | ||||
| } | |||||
| public void __enter__() | |||||
| { | |||||
| } | } | ||||
| public ITensorOrOperation as_graph_element(object obj, bool allow_tensor = true, bool allow_operation = true) | public ITensorOrOperation as_graph_element(object obj, bool allow_tensor = true, bool allow_operation = true) | ||||
| @@ -409,31 +411,6 @@ namespace Tensorflow | |||||
| return return_outputs; | return return_outputs; | ||||
| } | } | ||||
| public unsafe Operation[] ReturnOperations(IntPtr results) | |||||
| { | |||||
| TF_Operation return_oper_handle = new TF_Operation(); | |||||
| int num_return_opers = 0; | |||||
| c_api.TF_ImportGraphDefResultsReturnOperations(results, ref num_return_opers, ref return_oper_handle); | |||||
| Operation[] return_opers = new Operation[num_return_opers]; | |||||
| for (int i = 0; i < num_return_opers; i++) | |||||
| { | |||||
| var handle = return_oper_handle.node + Marshal.SizeOf<TF_Operation>() * i; | |||||
| return_opers[i] = new Operation(*(IntPtr*)handle); | |||||
| } | |||||
| return return_opers; | |||||
| } | |||||
| public Operation OperationByName(string operName) | |||||
| { | |||||
| return c_api.TF_GraphOperationByName(_handle, operName); | |||||
| } | |||||
| public ITensorOrOperation[] get_operations() | |||||
| { | |||||
| return _nodes_by_name.Values.Select(x => x).ToArray(); | |||||
| } | |||||
| public string[] get_all_collection_keys() | public string[] get_all_collection_keys() | ||||
| { | { | ||||
| return _collections.Keys.Where(x => !x.StartsWith("__")).ToArray(); | return _collections.Keys.Where(x => !x.StartsWith("__")).ToArray(); | ||||
| @@ -468,7 +445,12 @@ namespace Tensorflow | |||||
| public void Dispose() | public void Dispose() | ||||
| { | { | ||||
| // c_api.TF_DeleteGraph(_handle); | |||||
| /*if (_handle != IntPtr.Zero) | |||||
| c_api.TF_DeleteGraph(_handle); | |||||
| _handle = IntPtr.Zero; | |||||
| GC.SuppressFinalize(this);*/ | |||||
| } | } | ||||
| /// <summary> | /// <summary> | ||||
| @@ -481,17 +463,46 @@ namespace Tensorflow | |||||
| public Tensor get_tensor_by_name(string name) | public Tensor get_tensor_by_name(string name) | ||||
| { | { | ||||
| return (Tensor)this.as_graph_element(name, allow_tensor: true, allow_operation: false); | return (Tensor)this.as_graph_element(name, allow_tensor: true, allow_operation: false); | ||||
| } | |||||
| public void __enter__() | |||||
| { | |||||
| } | |||||
| public TensorShape GetTensorShape(TF_Output output) | |||||
| { | |||||
| var status = new Status(); | |||||
| var ndim = c_api.TF_GraphGetTensorNumDims(_handle, output, status); | |||||
| status.Check(); | |||||
| if (ndim == -1) | |||||
| return new TensorShape(); | |||||
| var dims = new long[ndim]; | |||||
| c_api.TF_GraphGetTensorShape(_handle, output, dims, dims.Length, status); | |||||
| status.Check(); | |||||
| return new TensorShape(dims.Select(x => (int)x).ToArray()); | |||||
| } | |||||
| public override string ToString() | |||||
| { | |||||
| int len = 0; | |||||
| return c_api.TF_GraphDebugString(_handle, out len); | |||||
| } | } | ||||
| public void __exit__() | public void __exit__() | ||||
| { | { | ||||
| } | |||||
| } | |||||
| private IEnumerable<Operation> GetEnumerable() | |||||
| => c_api_util.tf_operations(this); | |||||
| IEnumerator<Operation> IEnumerable<Operation>.GetEnumerator() | |||||
| => GetEnumerable().GetEnumerator(); | |||||
| IEnumerator IEnumerable.GetEnumerator() | |||||
| { | |||||
| throw new NotImplementedException(); | |||||
| } | |||||
| public static implicit operator IntPtr(Graph graph) | public static implicit operator IntPtr(Graph graph) | ||||
| { | { | ||||
| return graph._handle; | return graph._handle; | ||||
| @@ -43,6 +43,9 @@ namespace Tensorflow | |||||
| [DllImport(TensorFlowLibName)] | [DllImport(TensorFlowLibName)] | ||||
| public static extern void TF_DeleteImportGraphDefResults(IntPtr results); | public static extern void TF_DeleteImportGraphDefResults(IntPtr results); | ||||
| [DllImport(TensorFlowLibName)] | |||||
| public static extern string TF_GraphDebugString(IntPtr graph, out int len); | |||||
| [DllImport(TensorFlowLibName)] | [DllImport(TensorFlowLibName)] | ||||
| public static extern void TF_GraphGetOpDef(IntPtr graph, string op_name, IntPtr output_op_def, IntPtr status); | public static extern void TF_GraphGetOpDef(IntPtr graph, string op_name, IntPtr output_op_def, IntPtr status); | ||||
| @@ -100,6 +103,7 @@ namespace Tensorflow | |||||
| /// <param name="status">TF_Status*</param> | /// <param name="status">TF_Status*</param> | ||||
| [DllImport(TensorFlowLibName)] | [DllImport(TensorFlowLibName)] | ||||
| public static extern void TF_GraphImportGraphDef(IntPtr graph, IntPtr graph_def, IntPtr options, IntPtr status); | public static extern void TF_GraphImportGraphDef(IntPtr graph, IntPtr graph_def, IntPtr options, IntPtr status); | ||||
| /// <summary> | /// <summary> | ||||
| /// Iterate through the operations of a graph. | /// Iterate through the operations of a graph. | ||||
| /// </summary> | /// </summary> | ||||
| @@ -207,7 +207,7 @@ namespace Tensorflow.Keras.Layers | |||||
| public Tensor _assign_moving_average(RefVariable variable, Tensor value, Tensor momentum) | public Tensor _assign_moving_average(RefVariable variable, Tensor value, Tensor momentum) | ||||
| { | { | ||||
| return Python.with(ops.name_scope(null, "AssignMovingAvg", new { variable, value, momentum }), scope => | |||||
| return Python.tf_with(ops.name_scope(null, "AssignMovingAvg", new { variable, value, momentum }), scope => | |||||
| { | { | ||||
| // var cm = ops.colocate_with(variable); | // var cm = ops.colocate_with(variable); | ||||
| var decay = ops.convert_to_tensor(1.0f - momentum, name: "decay"); | var decay = ops.convert_to_tensor(1.0f - momentum, name: "decay"); | ||||
| @@ -125,7 +125,7 @@ namespace Tensorflow.Keras.Layers | |||||
| // Symbolic execution on symbolic tensors. We will attempt to build | // Symbolic execution on symbolic tensors. We will attempt to build | ||||
| // the corresponding TF subgraph inside `backend.get_graph()` | // the corresponding TF subgraph inside `backend.get_graph()` | ||||
| var graph = backend.get_graph().as_default(); | var graph = backend.get_graph().as_default(); | ||||
| with(ops.name_scope(_name_scope()), delegate | |||||
| tf_with(ops.name_scope(_name_scope()), delegate | |||||
| { | { | ||||
| // Build layer if applicable (if the `build` method has been | // Build layer if applicable (if the `build` method has been | ||||
| // overridden). | // overridden). | ||||
| @@ -72,7 +72,7 @@ namespace Tensorflow.Layers | |||||
| } | } | ||||
| Tensor outputs = null; | Tensor outputs = null; | ||||
| with(scope_context_manager, scope2 => | |||||
| tf_with(scope_context_manager, scope2 => | |||||
| { | { | ||||
| _current_scope = scope2; | _current_scope = scope2; | ||||
| // Actually call layer | // Actually call layer | ||||
| @@ -136,12 +136,12 @@ namespace Tensorflow.Layers | |||||
| _set_scope(); | _set_scope(); | ||||
| var reuse = built || (_reuse != null && _reuse.Value); | var reuse = built || (_reuse != null && _reuse.Value); | ||||
| return with(tf.variable_scope(_scope, | |||||
| return tf_with(tf.variable_scope(_scope, | |||||
| reuse: reuse, | reuse: reuse, | ||||
| auxiliary_name_scope: false), scope => | auxiliary_name_scope: false), scope => | ||||
| { | { | ||||
| _current_scope = scope; | _current_scope = scope; | ||||
| return with(ops.name_scope(_name_scope()), delegate | |||||
| return tf_with(ops.name_scope(_name_scope()), delegate | |||||
| { | { | ||||
| var variable = base.add_weight(name, | var variable = base.add_weight(name, | ||||
| shape, | shape, | ||||
| @@ -183,7 +183,7 @@ namespace Tensorflow.Layers | |||||
| } | } | ||||
| else | else | ||||
| { | { | ||||
| with(tf.variable_scope(scope, default_name: _base_name), captured_scope => | |||||
| tf_with(tf.variable_scope(scope, default_name: _base_name), captured_scope => | |||||
| { | { | ||||
| // convert variable_scope to VariableScope | // convert variable_scope to VariableScope | ||||
| _scope = captured_scope; | _scope = captured_scope; | ||||
| @@ -122,7 +122,7 @@ namespace Tensorflow.Operations | |||||
| _external_values[result.name] = result; | _external_values[result.name] = result; | ||||
| } | } | ||||
| with(ops.control_dependencies(null), ctrl => | |||||
| tf_with(ops.control_dependencies(null), ctrl => | |||||
| { | { | ||||
| var results = control_flow_ops._SwitchRefOrTensor(result, _pred); | var results = control_flow_ops._SwitchRefOrTensor(result, _pred); | ||||
| result = results[_branch]; | result = results[_branch]; | ||||
| @@ -58,7 +58,7 @@ namespace Tensorflow | |||||
| private Tensor _call_log_prob (Tensor value, string name) | private Tensor _call_log_prob (Tensor value, string name) | ||||
| { | { | ||||
| return with(ops.name_scope(name, "moments", new { value }), scope => | |||||
| return tf_with(ops.name_scope(name, "moments", new { value }), scope => | |||||
| { | { | ||||
| try | try | ||||
| { | { | ||||
| @@ -50,9 +50,9 @@ namespace Tensorflow | |||||
| parameters.Add("validate_args", validate_args); | parameters.Add("validate_args", validate_args); | ||||
| parameters.Add("allow_nan_stats", allow_nan_stats); | parameters.Add("allow_nan_stats", allow_nan_stats); | ||||
| with(ops.name_scope(name, "", new { loc, scale }), scope => | |||||
| tf_with(ops.name_scope(name, "", new { loc, scale }), scope => | |||||
| { | { | ||||
| with(ops.control_dependencies(validate_args ? new Operation[] { scale.op} : new Operation[] { }), cd => | |||||
| tf_with(ops.control_dependencies(validate_args ? new Operation[] { scale.op} : new Operation[] { }), cd => | |||||
| { | { | ||||
| this._loc = array_ops.identity(loc, name); | this._loc = array_ops.identity(loc, name); | ||||
| this._scale = array_ops.identity(scale, name); | this._scale = array_ops.identity(scale, name); | ||||
| @@ -24,7 +24,7 @@ namespace Tensorflow | |||||
| public Tensor compute_weighted_loss(Tensor losses, Tensor weights = null, string scope = null, | public Tensor compute_weighted_loss(Tensor losses, Tensor weights = null, string scope = null, | ||||
| string loss_collection = ops.GraphKeys.LOSSES, string reduction = Reduction.SUM_BY_NONZERO_WEIGHTS) | string loss_collection = ops.GraphKeys.LOSSES, string reduction = Reduction.SUM_BY_NONZERO_WEIGHTS) | ||||
| { | { | ||||
| return with(ops.name_scope(scope, default_name: "weighted_loss", (losses, weights)), delegate | |||||
| return tf_with(ops.name_scope(scope, default_name: "weighted_loss", (losses, weights)), delegate | |||||
| { | { | ||||
| // Save the `reduction` argument for loss normalization when distributing | // Save the `reduction` argument for loss normalization when distributing | ||||
| // to multiple replicas. Used only for estimator + v1 optimizer flow. | // to multiple replicas. Used only for estimator + v1 optimizer flow. | ||||
| @@ -77,7 +77,7 @@ namespace Tensorflow | |||||
| public Tensor _num_present(Tensor losses, Tensor weights, bool per_batch = false) | public Tensor _num_present(Tensor losses, Tensor weights, bool per_batch = false) | ||||
| { | { | ||||
| return with(ops.name_scope(null, default_name: "num_present", (losses, weights)), name_scope => | |||||
| return tf_with(ops.name_scope(null, default_name: "num_present", (losses, weights)), name_scope => | |||||
| { | { | ||||
| string scope = name_scope; | string scope = name_scope; | ||||
| weights = math_ops.cast(weights, dtype: dtypes.float32); | weights = math_ops.cast(weights, dtype: dtypes.float32); | ||||
| @@ -104,7 +104,7 @@ namespace Tensorflow | |||||
| string loss_collection= ops.GraphKeys.LOSSES, | string loss_collection= ops.GraphKeys.LOSSES, | ||||
| string reduction = Reduction.SUM_BY_NONZERO_WEIGHTS) | string reduction = Reduction.SUM_BY_NONZERO_WEIGHTS) | ||||
| { | { | ||||
| return with(ops.name_scope(scope, | |||||
| return tf_with(ops.name_scope(scope, | |||||
| "sparse_softmax_cross_entropy_loss", | "sparse_softmax_cross_entropy_loss", | ||||
| (logits, labels, weights)), | (logits, labels, weights)), | ||||
| name_scope => | name_scope => | ||||
| @@ -30,7 +30,7 @@ namespace Tensorflow.Operations | |||||
| string data_format = "NHWC", | string data_format = "NHWC", | ||||
| string name = null) | string name = null) | ||||
| { | { | ||||
| return with(ops.name_scope(name, "MaxPool", value), scope => | |||||
| return tf_with(ops.name_scope(name, "MaxPool", value), scope => | |||||
| { | { | ||||
| name = scope; | name = scope; | ||||
| value = ops.convert_to_tensor(value, name: "input"); | value = ops.convert_to_tensor(value, name: "input"); | ||||
| @@ -29,7 +29,7 @@ namespace Tensorflow.Operations | |||||
| TF_DataType dtype = TF_DataType.DtInvalid, | TF_DataType dtype = TF_DataType.DtInvalid, | ||||
| int? parallel_iterations = null, bool swap_memory = false, bool time_major = false) | int? parallel_iterations = null, bool swap_memory = false, bool time_major = false) | ||||
| { | { | ||||
| with(tf.variable_scope("rnn"), scope => | |||||
| tf_with(tf.variable_scope("rnn"), scope => | |||||
| { | { | ||||
| VariableScope varscope = scope; | VariableScope varscope = scope; | ||||
| var flat_input = nest.flatten(inputs_tensor); | var flat_input = nest.flatten(inputs_tensor); | ||||
| @@ -139,7 +139,7 @@ namespace Tensorflow.Operations | |||||
| var time = array_ops.constant(0, dtype: dtypes.int32, name: "time"); | var time = array_ops.constant(0, dtype: dtypes.int32, name: "time"); | ||||
| string base_name = null; | string base_name = null; | ||||
| with(ops.name_scope("dynamic_rnn"), scope => base_name = scope); | |||||
| tf_with(ops.name_scope("dynamic_rnn"), scope => base_name = scope); | |||||
| Func<string, TensorShape, TF_DataType, Tensor> _create_ta = (name, element_shape, dtype_) => | Func<string, TensorShape, TF_DataType, Tensor> _create_ta = (name, element_shape, dtype_) => | ||||
| { | { | ||||
| @@ -58,7 +58,7 @@ namespace Tensorflow | |||||
| var input_types = new List<TF_DataType>(); | var input_types = new List<TF_DataType>(); | ||||
| object values = null; | object values = null; | ||||
| return with(ops.name_scope(name), scope => | |||||
| return tf_with(ops.name_scope(name), scope => | |||||
| { | { | ||||
| var inferred_from = new Dictionary<string, object>(); | var inferred_from = new Dictionary<string, object>(); | ||||
| var base_types = new List<TF_DataType>(); | var base_types = new List<TF_DataType>(); | ||||
| @@ -23,7 +23,10 @@ namespace Tensorflow | |||||
| /// </summary> | /// </summary> | ||||
| public partial class Operation | public partial class Operation | ||||
| { | { | ||||
| public static implicit operator Operation(IntPtr handle) => new Operation(handle); | |||||
| // make sure the new op is in the same graph instance | |||||
| public static implicit operator Operation(IntPtr handle) | |||||
| => new Operation(handle); | |||||
| public static implicit operator IntPtr(Operation op) => op._handle; | public static implicit operator IntPtr(Operation op) => op._handle; | ||||
| public static implicit operator Tensor(Operation op) => op.output; | public static implicit operator Tensor(Operation op) => op.output; | ||||
| @@ -26,7 +26,18 @@ namespace Tensorflow | |||||
| { | { | ||||
| public TF_Output Input(int index) => c_api.TF_OperationInput(new TF_Input(_handle, index)); | public TF_Output Input(int index) => c_api.TF_OperationInput(new TF_Input(_handle, index)); | ||||
| public TF_DataType InputType(int index) => c_api.TF_OperationInputType(new TF_Input(_handle, index)); | public TF_DataType InputType(int index) => c_api.TF_OperationInputType(new TF_Input(_handle, index)); | ||||
| public int InputListLength(string name) => c_api.TF_OperationInputListLength(_handle, name, status); | |||||
| public int InputListLength(string name) | |||||
| { | |||||
| int num = 0; | |||||
| using(var status = new Status()) | |||||
| { | |||||
| num = c_api.TF_OperationInputListLength(_handle, name, status); | |||||
| status.Check(true); | |||||
| } | |||||
| return num; | |||||
| } | |||||
| public int NumInputs => c_api.TF_OperationNumInputs(_handle); | public int NumInputs => c_api.TF_OperationNumInputs(_handle); | ||||
| private TF_DataType[] _input_types => _inputs._inputs.Select(x => x.dtype).ToArray(); | private TF_DataType[] _input_types => _inputs._inputs.Select(x => x.dtype).ToArray(); | ||||
| @@ -24,7 +24,18 @@ namespace Tensorflow | |||||
| { | { | ||||
| public int NumOutputs => c_api.TF_OperationNumOutputs(_handle); | public int NumOutputs => c_api.TF_OperationNumOutputs(_handle); | ||||
| public TF_DataType OutputType(int index) => c_api.TF_OperationOutputType(new TF_Output(_handle, index)); | public TF_DataType OutputType(int index) => c_api.TF_OperationOutputType(new TF_Output(_handle, index)); | ||||
| public int OutputListLength(string name) => c_api.TF_OperationOutputListLength(_handle, name, status); | |||||
| public int OutputListLength(string name) | |||||
| { | |||||
| int num = 0; | |||||
| using (var status = new Status()) | |||||
| { | |||||
| num = c_api.TF_OperationOutputListLength(_handle, name, status); | |||||
| status.Check(true); | |||||
| } | |||||
| return num; | |||||
| } | |||||
| private Tensor[] _outputs; | private Tensor[] _outputs; | ||||
| public Tensor[] outputs => _outputs; | public Tensor[] outputs => _outputs; | ||||
| @@ -35,6 +46,8 @@ namespace Tensorflow | |||||
| public int OutputNumConsumers(int index) => c_api.TF_OperationOutputNumConsumers(new TF_Output(_handle, index)); | public int OutputNumConsumers(int index) => c_api.TF_OperationOutputNumConsumers(new TF_Output(_handle, index)); | ||||
| public TF_Output this[int index] => _tf_output(index); | |||||
| public unsafe TF_Input[] OutputConsumers(int index, int max_consumers) | public unsafe TF_Input[] OutputConsumers(int index, int max_consumers) | ||||
| { | { | ||||
| int size = Marshal.SizeOf<TF_Input>(); | int size = Marshal.SizeOf<TF_Input>(); | ||||
| @@ -54,11 +54,10 @@ namespace Tensorflow | |||||
| public Operation op => this; | public Operation op => this; | ||||
| public TF_DataType dtype => TF_DataType.DtInvalid; | public TF_DataType dtype => TF_DataType.DtInvalid; | ||||
| private Status status = new Status(); | |||||
| public string name => _handle == IntPtr.Zero ? null : c_api.StringPiece(c_api.TF_OperationName(_handle)); | public string name => _handle == IntPtr.Zero ? null : c_api.StringPiece(c_api.TF_OperationName(_handle)); | ||||
| public string OpType => c_api.StringPiece(c_api.TF_OperationOpType(_handle)); | |||||
| public string Device => c_api.StringPiece(c_api.TF_OperationDevice(_handle)); | |||||
| public string OpType => _handle == IntPtr.Zero ? null : c_api.StringPiece(c_api.TF_OperationOpType(_handle)); | |||||
| public string Device => _handle == IntPtr.Zero ? null : c_api.StringPiece(c_api.TF_OperationDevice(_handle)); | |||||
| private NodeDef _node_def; | private NodeDef _node_def; | ||||
| public NodeDef node_def | public NodeDef node_def | ||||
| @@ -96,10 +95,14 @@ namespace Tensorflow | |||||
| _operDesc = c_api.TF_NewOperation(g, opType, oper_name); | _operDesc = c_api.TF_NewOperation(g, opType, oper_name); | ||||
| c_api.TF_SetAttrType(_operDesc, "dtype", TF_DataType.TF_INT32); | c_api.TF_SetAttrType(_operDesc, "dtype", TF_DataType.TF_INT32); | ||||
| _handle = c_api.TF_FinishOperation(_operDesc, status); | |||||
| // Dict mapping op name to file and line information for op colocation | |||||
| // context managers. | |||||
| using (var status = new Status()) | |||||
| { | |||||
| _handle = c_api.TF_FinishOperation(_operDesc, status); | |||||
| status.Check(true); | |||||
| } | |||||
| // Dict mapping op name to file and line information for op colocation | |||||
| // context managers. | |||||
| _control_flow_context = graph._get_control_flow_context(); | _control_flow_context = graph._get_control_flow_context(); | ||||
| } | } | ||||
| @@ -220,6 +223,7 @@ namespace Tensorflow | |||||
| { | { | ||||
| AttrValue x = null; | AttrValue x = null; | ||||
| using (var status = new Status()) | |||||
| using (var buf = new Buffer()) | using (var buf = new Buffer()) | ||||
| { | { | ||||
| c_api.TF_OperationGetAttrValueProto(_handle, name, buf, status); | c_api.TF_OperationGetAttrValueProto(_handle, name, buf, status); | ||||
| @@ -274,12 +278,15 @@ namespace Tensorflow | |||||
| var output = tensor._as_tf_output(); | var output = tensor._as_tf_output(); | ||||
| // Reset cached inputs. | // Reset cached inputs. | ||||
| _inputs = null; | |||||
| _inputs = null; | |||||
| // after the c_api call next time _inputs is accessed | // after the c_api call next time _inputs is accessed | ||||
| // the updated inputs are reloaded from the c_api | // the updated inputs are reloaded from the c_api | ||||
| c_api.UpdateEdge(_graph, output, input, status); | |||||
| //var updated_inputs = inputs; | |||||
| status.Check(); | |||||
| using (var status = new Status()) | |||||
| { | |||||
| c_api.UpdateEdge(_graph, output, input, status); | |||||
| //var updated_inputs = inputs; | |||||
| status.Check(); | |||||
| } | |||||
| } | } | ||||
| private void _assert_same_graph(Tensor tensor) | private void _assert_same_graph(Tensor tensor) | ||||
| @@ -82,7 +82,7 @@ namespace Tensorflow | |||||
| { | { | ||||
| Tensor output = null; | Tensor output = null; | ||||
| var state_size = this.state_size; | var state_size = this.state_size; | ||||
| with(ops.name_scope($"{this.GetType().Name}ZeroState", values: new { batch_size }), delegate | |||||
| tf_with(ops.name_scope($"{this.GetType().Name}ZeroState", values: new { batch_size }), delegate | |||||
| { | { | ||||
| output = _zero_state_tensors(state_size, batch_size, dtype); | output = _zero_state_tensors(state_size, batch_size, dtype); | ||||
| }); | }); | ||||
| @@ -66,7 +66,7 @@ namespace Tensorflow.Operations | |||||
| _element_shape = new List<TensorShape> { }; | _element_shape = new List<TensorShape> { }; | ||||
| } | } | ||||
| with(ops.name_scope(name, "", new { handle, size, flow }), scope => | |||||
| tf_with(ops.name_scope(name, "", new { handle, size, flow }), scope => | |||||
| { | { | ||||
| if(handle != null) | if(handle != null) | ||||
| { | { | ||||
| @@ -43,7 +43,7 @@ namespace Tensorflow | |||||
| public static Tensor zeros(Shape shape, TF_DataType dtype = TF_DataType.TF_FLOAT, string name = null) | public static Tensor zeros(Shape shape, TF_DataType dtype = TF_DataType.TF_FLOAT, string name = null) | ||||
| { | { | ||||
| dtype = dtype.as_base_dtype(); | dtype = dtype.as_base_dtype(); | ||||
| return with(ops.name_scope(name, "zeros", shape), scope => | |||||
| return tf_with(ops.name_scope(name, "zeros", shape), scope => | |||||
| { | { | ||||
| name = scope; | name = scope; | ||||
| switch (dtype) | switch (dtype) | ||||
| @@ -67,7 +67,7 @@ namespace Tensorflow | |||||
| public static Tensor zeros(Tensor shape, TF_DataType dtype = TF_DataType.TF_FLOAT, string name = null) | public static Tensor zeros(Tensor shape, TF_DataType dtype = TF_DataType.TF_FLOAT, string name = null) | ||||
| { | { | ||||
| dtype = dtype.as_base_dtype(); | dtype = dtype.as_base_dtype(); | ||||
| return with(ops.name_scope(name, "zeros", shape), scope => | |||||
| return tf_with(ops.name_scope(name, "zeros", shape), scope => | |||||
| { | { | ||||
| name = scope; | name = scope; | ||||
| switch (dtype) | switch (dtype) | ||||
| @@ -140,7 +140,7 @@ namespace Tensorflow | |||||
| { | { | ||||
| var must_pack = false; | var must_pack = false; | ||||
| var converted_elems = new List<object>(); | var converted_elems = new List<object>(); | ||||
| return with(ops.name_scope(name), scope => | |||||
| return tf_with(ops.name_scope(name), scope => | |||||
| { | { | ||||
| foreach (var (i, elem) in enumerate(list_or_tuple)) | foreach (var (i, elem) in enumerate(list_or_tuple)) | ||||
| { | { | ||||
| @@ -189,7 +189,7 @@ namespace Tensorflow | |||||
| public static Tensor rank_internal(Tensor input, string name = null, bool optimize = true) | public static Tensor rank_internal(Tensor input, string name = null, bool optimize = true) | ||||
| { | { | ||||
| return with(ops.name_scope(name, "Rank", new List<Tensor> { input }), scope => | |||||
| return tf_with(ops.name_scope(name, "Rank", new List<Tensor> { input }), scope => | |||||
| { | { | ||||
| name = scope; | name = scope; | ||||
| var input_tensor = ops.convert_to_tensor(input); | var input_tensor = ops.convert_to_tensor(input); | ||||
| @@ -217,7 +217,7 @@ namespace Tensorflow | |||||
| private static Tensor ones_like_impl<T>(T tensor, TF_DataType dtype, string name, bool optimize = true) | private static Tensor ones_like_impl<T>(T tensor, TF_DataType dtype, string name, bool optimize = true) | ||||
| { | { | ||||
| return with(ops.name_scope(name, "ones_like", new { tensor }), scope => | |||||
| return tf_with(ops.name_scope(name, "ones_like", new { tensor }), scope => | |||||
| { | { | ||||
| name = scope; | name = scope; | ||||
| var tensor1 = ops.convert_to_tensor(tensor, name: "tensor"); | var tensor1 = ops.convert_to_tensor(tensor, name: "tensor"); | ||||
| @@ -233,7 +233,7 @@ namespace Tensorflow | |||||
| public static Tensor ones(Tensor shape, TF_DataType dtype = TF_DataType.TF_FLOAT, string name = null) | public static Tensor ones(Tensor shape, TF_DataType dtype = TF_DataType.TF_FLOAT, string name = null) | ||||
| { | { | ||||
| dtype = dtype.as_base_dtype(); | dtype = dtype.as_base_dtype(); | ||||
| return with(ops.name_scope(name, "ones", new { shape }), scope => | |||||
| return tf_with(ops.name_scope(name, "ones", new { shape }), scope => | |||||
| { | { | ||||
| name = scope; | name = scope; | ||||
| var output = gen_array_ops.fill(shape, constant_op.constant(1.0f, dtype: dtype), name: name); | var output = gen_array_ops.fill(shape, constant_op.constant(1.0f, dtype: dtype), name: name); | ||||
| @@ -244,7 +244,7 @@ namespace Tensorflow | |||||
| public static Tensor ones(Tensor[] shape, TF_DataType dtype = TF_DataType.TF_FLOAT, string name = null) | public static Tensor ones(Tensor[] shape, TF_DataType dtype = TF_DataType.TF_FLOAT, string name = null) | ||||
| { | { | ||||
| dtype = dtype.as_base_dtype(); | dtype = dtype.as_base_dtype(); | ||||
| return with(ops.name_scope(name, "ones", new { shape }), scope => | |||||
| return tf_with(ops.name_scope(name, "ones", new { shape }), scope => | |||||
| { | { | ||||
| name = scope; | name = scope; | ||||
| var output = _constant_if_small(1, shape[0]); | var output = _constant_if_small(1, shape[0]); | ||||
| @@ -257,7 +257,7 @@ namespace Tensorflow | |||||
| public static Tensor ones(int[] dims, TF_DataType dtype = TF_DataType.TF_FLOAT, string name = null) | public static Tensor ones(int[] dims, TF_DataType dtype = TF_DataType.TF_FLOAT, string name = null) | ||||
| { | { | ||||
| dtype = dtype.as_base_dtype(); | dtype = dtype.as_base_dtype(); | ||||
| return with(ops.name_scope(name, "ones", new { dims }), scope => | |||||
| return tf_with(ops.name_scope(name, "ones", new { dims }), scope => | |||||
| { | { | ||||
| name = scope; | name = scope; | ||||
| var shape = ops.convert_to_tensor(dims, dtype: TF_DataType.TF_INT32); | var shape = ops.convert_to_tensor(dims, dtype: TF_DataType.TF_INT32); | ||||
| @@ -273,7 +273,7 @@ namespace Tensorflow | |||||
| int axis = -1, | int axis = -1, | ||||
| string name = null) | string name = null) | ||||
| { | { | ||||
| return with(ops.name_scope(name, "one_hot", new { indices, depth, dtype }), scope => | |||||
| return tf_with(ops.name_scope(name, "one_hot", new { indices, depth, dtype }), scope => | |||||
| { | { | ||||
| name = scope; | name = scope; | ||||
| var on_exists = false; | var on_exists = false; | ||||
| @@ -341,7 +341,7 @@ namespace Tensorflow | |||||
| private static Tensor shape_internal(Tensor input, string name = null, bool optimize = true, TF_DataType out_type = TF_DataType.TF_INT32) | private static Tensor shape_internal(Tensor input, string name = null, bool optimize = true, TF_DataType out_type = TF_DataType.TF_INT32) | ||||
| { | { | ||||
| return with(ops.name_scope(name, "Shape", new { input }), scope => | |||||
| return tf_with(ops.name_scope(name, "Shape", new { input }), scope => | |||||
| { | { | ||||
| name = scope; | name = scope; | ||||
| @@ -362,7 +362,7 @@ namespace Tensorflow | |||||
| private static Tensor size_internal(Tensor input, string name = null, bool optimize = true, TF_DataType out_type = TF_DataType.TF_INT32) | private static Tensor size_internal(Tensor input, string name = null, bool optimize = true, TF_DataType out_type = TF_DataType.TF_INT32) | ||||
| { | { | ||||
| return with(ops.name_scope(name, "Size", new { input }), scope => | |||||
| return tf_with(ops.name_scope(name, "Size", new { input }), scope => | |||||
| { | { | ||||
| name = scope; | name = scope; | ||||
| @@ -382,7 +382,7 @@ namespace Tensorflow | |||||
| public static Tensor zeros_like(Tensor tensor, TF_DataType dtype = TF_DataType.DtInvalid, string name = null, bool optimize = true) | public static Tensor zeros_like(Tensor tensor, TF_DataType dtype = TF_DataType.DtInvalid, string name = null, bool optimize = true) | ||||
| { | { | ||||
| return with(ops.name_scope(name, "zeros_like", new Tensor[] { tensor }), scope => | |||||
| return tf_with(ops.name_scope(name, "zeros_like", new Tensor[] { tensor }), scope => | |||||
| { | { | ||||
| name = scope; | name = scope; | ||||
| tensor = ops.convert_to_tensor(tensor, name: "tensor"); | tensor = ops.convert_to_tensor(tensor, name: "tensor"); | ||||
| @@ -516,7 +516,7 @@ namespace Tensorflow | |||||
| { | { | ||||
| if(values.Length == 1) // Degenerate case of one tensor. | if(values.Length == 1) // Degenerate case of one tensor. | ||||
| { | { | ||||
| return with(ops.name_scope(name), scope => { | |||||
| return tf_with(ops.name_scope(name), scope => { | |||||
| var t = ops.convert_to_tensor(axis, name: "concat_dim", dtype: TF_DataType.TF_INT32); | var t = ops.convert_to_tensor(axis, name: "concat_dim", dtype: TF_DataType.TF_INT32); | ||||
| return identity(values[0], name: scope); | return identity(values[0], name: scope); | ||||
| }); | }); | ||||
| @@ -535,7 +535,7 @@ namespace Tensorflow | |||||
| public static Tensor transpose<T1, T2>(T1 a, T2 perm, string name = "transpose", bool conjugate = false) | public static Tensor transpose<T1, T2>(T1 a, T2 perm, string name = "transpose", bool conjugate = false) | ||||
| { | { | ||||
| return with(ops.name_scope(name, "transpose", new { a }), scope => | |||||
| return tf_with(ops.name_scope(name, "transpose", new { a }), scope => | |||||
| { | { | ||||
| return gen_array_ops.transpose(a, perm, name: scope); | return gen_array_ops.transpose(a, perm, name: scope); | ||||
| }); | }); | ||||
| @@ -31,7 +31,7 @@ namespace Tensorflow | |||||
| if (message == null) | if (message == null) | ||||
| message = ""; | message = ""; | ||||
| return with(ops.name_scope(name, "assert_equal", new { t1, t2, data }), delegate | |||||
| return tf_with(ops.name_scope(name, "assert_equal", new { t1, t2, data }), delegate | |||||
| { | { | ||||
| var x = ops.convert_to_tensor(t1, name: "x"); | var x = ops.convert_to_tensor(t1, name: "x"); | ||||
| var y = ops.convert_to_tensor(t2, name: "y"); | var y = ops.convert_to_tensor(t2, name: "y"); | ||||
| @@ -62,7 +62,7 @@ namespace Tensorflow | |||||
| if (message == null) | if (message == null) | ||||
| message = ""; | message = ""; | ||||
| return with(ops.name_scope(name, "assert_positive", new { x, data }), delegate | |||||
| return tf_with(ops.name_scope(name, "assert_positive", new { x, data }), delegate | |||||
| { | { | ||||
| x = ops.convert_to_tensor(x, name: "x"); | x = ops.convert_to_tensor(x, name: "x"); | ||||
| if (data == null) | if (data == null) | ||||
| @@ -86,7 +86,7 @@ namespace Tensorflow | |||||
| if (message == null) | if (message == null) | ||||
| message = ""; | message = ""; | ||||
| return with(ops.name_scope(name, "assert_less", new { x, y, data }), delegate | |||||
| return tf_with(ops.name_scope(name, "assert_less", new { x, y, data }), delegate | |||||
| { | { | ||||
| x = ops.convert_to_tensor(x, name: "x"); | x = ops.convert_to_tensor(x, name: "x"); | ||||
| y = ops.convert_to_tensor(y, name: "y"); | y = ops.convert_to_tensor(y, name: "y"); | ||||
| @@ -34,7 +34,7 @@ namespace Tensorflow | |||||
| int expected_rank_diff = 0, | int expected_rank_diff = 0, | ||||
| string name = null) | string name = null) | ||||
| { | { | ||||
| return with(ops.name_scope(name, default_name: "remove_squeezable_dimensions", (labels, predictions)), delegate | |||||
| return tf_with(ops.name_scope(name, default_name: "remove_squeezable_dimensions", (labels, predictions)), delegate | |||||
| { | { | ||||
| predictions = ops.convert_to_tensor(predictions); | predictions = ops.convert_to_tensor(predictions); | ||||
| labels = ops.convert_to_tensor(labels); | labels = ops.convert_to_tensor(labels); | ||||
| @@ -28,7 +28,7 @@ namespace Tensorflow | |||||
| { | { | ||||
| public static Operation Assert(Tensor condition, object[] data, int? summarize = null, string name = null) | public static Operation Assert(Tensor condition, object[] data, int? summarize = null, string name = null) | ||||
| { | { | ||||
| return with(ops.name_scope(name, "Assert", new { condition, data }), scope => | |||||
| return tf_with(ops.name_scope(name, "Assert", new { condition, data }), scope => | |||||
| { | { | ||||
| name = scope; | name = scope; | ||||
| var xs = ops.convert_n_to_tensor(data); | var xs = ops.convert_n_to_tensor(data); | ||||
| @@ -53,7 +53,7 @@ namespace Tensorflow | |||||
| public static Operation group<T>(T[] inputs, string name = null) where T : ITensorOrOperation | public static Operation group<T>(T[] inputs, string name = null) where T : ITensorOrOperation | ||||
| { | { | ||||
| return with(ops.name_scope(name, "group_deps", inputs), scope => | |||||
| return tf_with(ops.name_scope(name, "group_deps", inputs), scope => | |||||
| { | { | ||||
| name = scope; | name = scope; | ||||
| @@ -91,7 +91,7 @@ namespace Tensorflow | |||||
| private static Operation _GroupControlDeps(string dev, Operation[] deps, string name = null) | private static Operation _GroupControlDeps(string dev, Operation[] deps, string name = null) | ||||
| { | { | ||||
| return with(ops.control_dependencies(deps), ctl => | |||||
| return tf_with(ops.control_dependencies(deps), ctl => | |||||
| { | { | ||||
| if (dev == null) | if (dev == null) | ||||
| { | { | ||||
| @@ -135,7 +135,7 @@ namespace Tensorflow | |||||
| public static Tensor[] tuple(Tensor[] tensors, string name = null, Operation[] control_inputs = null) | public static Tensor[] tuple(Tensor[] tensors, string name = null, Operation[] control_inputs = null) | ||||
| { | { | ||||
| return with(ops.name_scope(name, "tuple", tensors), scope => | |||||
| return tf_with(ops.name_scope(name, "tuple", tensors), scope => | |||||
| { | { | ||||
| name = scope; | name = scope; | ||||
| var gating_ops = tensors.Where(x => x != null).Select(x => x.op).ToList(); | var gating_ops = tensors.Where(x => x != null).Select(x => x.op).ToList(); | ||||
| @@ -189,13 +189,13 @@ namespace Tensorflow | |||||
| values.AddRange(dependencies); | values.AddRange(dependencies); | ||||
| values.Add(output_tensor); | values.Add(output_tensor); | ||||
| return with(ops.name_scope(name, "control_dependency", values), scope => | |||||
| return tf_with(ops.name_scope(name, "control_dependency", values), scope => | |||||
| { | { | ||||
| name = scope; | name = scope; | ||||
| // TODO: missing original code | // TODO: missing original code | ||||
| //with ops.colocate_with(output_tensor): | //with ops.colocate_with(output_tensor): | ||||
| { | { | ||||
| return with(ops.control_dependencies(dependencies), ctl => | |||||
| return tf_with(ops.control_dependencies(dependencies), ctl => | |||||
| { | { | ||||
| output_tensor = ops.convert_to_tensor_or_composite(output_tensor); | output_tensor = ops.convert_to_tensor_or_composite(output_tensor); | ||||
| return _Identity(output_tensor, name: name); | return _Identity(output_tensor, name: name); | ||||
| @@ -306,7 +306,7 @@ namespace Tensorflow | |||||
| bool strict = false, | bool strict = false, | ||||
| string name = null) | string name = null) | ||||
| { | { | ||||
| return with(ops.name_scope(name, "cond", new { pred }), delegate | |||||
| return tf_with(ops.name_scope(name, "cond", new { pred }), delegate | |||||
| { | { | ||||
| // TODO: here a chunk of original code is missing | // TODO: here a chunk of original code is missing | ||||
| /* | /* | ||||
| @@ -398,7 +398,7 @@ namespace Tensorflow | |||||
| bool strict = false, | bool strict = false, | ||||
| string name = null) | string name = null) | ||||
| { | { | ||||
| return with(ops.name_scope(name, "cond", new { pred }), delegate | |||||
| return tf_with(ops.name_scope(name, "cond", new { pred }), delegate | |||||
| { | { | ||||
| // Add the Switch to the graph. | // Add the Switch to the graph. | ||||
| var switch_result = @switch(pred, pred); | var switch_result = @switch(pred, pred); | ||||
| @@ -467,7 +467,7 @@ namespace Tensorflow | |||||
| { | { | ||||
| if (inputs.Any(x => x == null)) | if (inputs.Any(x => x == null)) | ||||
| throw new ValueError($"At least one of the merge inputs is null: {inputs}"); | throw new ValueError($"At least one of the merge inputs is null: {inputs}"); | ||||
| return with(ops.name_scope(name, "Merge", inputs), scope => | |||||
| return tf_with(ops.name_scope(name, "Merge", inputs), scope => | |||||
| { | { | ||||
| name = scope; | name = scope; | ||||
| inputs = inputs.Select(inp => | inputs = inputs.Select(inp => | ||||
| @@ -489,7 +489,7 @@ namespace Tensorflow | |||||
| TF_DataType dtype = TF_DataType.DtInvalid, | TF_DataType dtype = TF_DataType.DtInvalid, | ||||
| string name = null) | string name = null) | ||||
| { | { | ||||
| return with(ops.name_scope(name, "Switch", new { data, pred }), scope => | |||||
| return tf_with(ops.name_scope(name, "Switch", new { data, pred }), scope => | |||||
| { | { | ||||
| name = scope; | name = scope; | ||||
| data = ops.internal_convert_to_tensor_or_indexed_slices(data, | data = ops.internal_convert_to_tensor_or_indexed_slices(data, | ||||
| @@ -35,7 +35,7 @@ namespace Tensorflow | |||||
| string name = null, | string name = null, | ||||
| string max_norm = null) | string max_norm = null) | ||||
| { | { | ||||
| return with(ops.name_scope(name, "embedding_lookup", new { @params, ids }), scope => | |||||
| return tf_with(ops.name_scope(name, "embedding_lookup", new { @params, ids }), scope => | |||||
| { | { | ||||
| name = scope; | name = scope; | ||||
| int np = 1; | int np = 1; | ||||
| @@ -58,7 +58,7 @@ namespace Tensorflow | |||||
| string name = null, | string name = null, | ||||
| string max_norm = null) | string max_norm = null) | ||||
| { | { | ||||
| return with(ops.name_scope(name, "embedding_lookup", new { @params, ids }), scope => | |||||
| return tf_with(ops.name_scope(name, "embedding_lookup", new { @params, ids }), scope => | |||||
| { | { | ||||
| name = scope; | name = scope; | ||||
| int np = @params.Length; | int np = @params.Length; | ||||
| @@ -28,7 +28,7 @@ namespace Tensorflow | |||||
| if (dtype == image.dtype) | if (dtype == image.dtype) | ||||
| return array_ops.identity(image, name: name); | return array_ops.identity(image, name: name); | ||||
| return with(ops.name_scope(name, "convert_image", image), scope => | |||||
| return tf_with(ops.name_scope(name, "convert_image", image), scope => | |||||
| { | { | ||||
| name = scope; | name = scope; | ||||
| @@ -350,6 +350,13 @@ namespace Tensorflow | |||||
| return _op.outputs[0]; | return _op.outputs[0]; | ||||
| } | } | ||||
| public static Tensor logical_and(Tensor x, Tensor y, string name = null) | |||||
| { | |||||
| var _op = _op_def_lib._apply_op_helper("LogicalAnd", name, args: new { x, y }); | |||||
| return _op.outputs[0]; | |||||
| } | |||||
| public static Tensor squared_difference(Tensor x, Tensor y, string name = null) | public static Tensor squared_difference(Tensor x, Tensor y, string name = null) | ||||
| { | { | ||||
| var _op = _op_def_lib._apply_op_helper("SquaredDifference", name, args: new { x, y, name }); | var _op = _op_def_lib._apply_op_helper("SquaredDifference", name, args: new { x, y, name }); | ||||
| @@ -29,7 +29,7 @@ namespace Tensorflow | |||||
| { | { | ||||
| public static Tensor abs(Tensor x, string name = null) | public static Tensor abs(Tensor x, string name = null) | ||||
| { | { | ||||
| return with(ops.name_scope(name, "Abs", new { x }), scope => | |||||
| return tf_with(ops.name_scope(name, "Abs", new { x }), scope => | |||||
| { | { | ||||
| x = ops.convert_to_tensor(x, name: "x"); | x = ops.convert_to_tensor(x, name: "x"); | ||||
| if (x.dtype.is_complex()) | if (x.dtype.is_complex()) | ||||
| @@ -69,7 +69,7 @@ namespace Tensorflow | |||||
| if(base_type == x.dtype) | if(base_type == x.dtype) | ||||
| return x; | return x; | ||||
| return with(ops.name_scope(name, "Cast", new { x }), scope => | |||||
| return tf_with(ops.name_scope(name, "Cast", new { x }), scope => | |||||
| { | { | ||||
| name = scope; | name = scope; | ||||
| x = ops.convert_to_tensor(x, name: "x"); | x = ops.convert_to_tensor(x, name: "x"); | ||||
| @@ -82,7 +82,7 @@ namespace Tensorflow | |||||
| public static Tensor cumsum(Tensor x, int axis = 0, bool exclusive = false, bool reverse = false, string name = null) | public static Tensor cumsum(Tensor x, int axis = 0, bool exclusive = false, bool reverse = false, string name = null) | ||||
| { | { | ||||
| return with(ops.name_scope(name, "Cumsum", new {x}), scope => | |||||
| return tf_with(ops.name_scope(name, "Cumsum", new {x}), scope => | |||||
| { | { | ||||
| name = scope; | name = scope; | ||||
| x = ops.convert_to_tensor(x, name: "x"); | x = ops.convert_to_tensor(x, name: "x"); | ||||
| @@ -110,7 +110,7 @@ namespace Tensorflow | |||||
| /// <returns>`x / y` returns the quotient of x and y.</returns> | /// <returns>`x / y` returns the quotient of x and y.</returns> | ||||
| public static Tensor div(Tensor x, Tensor y, string name = null) | public static Tensor div(Tensor x, Tensor y, string name = null) | ||||
| { | { | ||||
| return with(ops.name_scope(name, "div", (x, y)), name_scope => | |||||
| return tf_with(ops.name_scope(name, "div", (x, y)), name_scope => | |||||
| { | { | ||||
| name = name_scope; | name = name_scope; | ||||
| x = ops.convert_to_tensor(x, name: "x"); | x = ops.convert_to_tensor(x, name: "x"); | ||||
| @@ -146,7 +146,7 @@ namespace Tensorflow | |||||
| /// </remarks> | /// </remarks> | ||||
| public static Tensor div_no_nan(Tensor x, Tensor y, string name = null) | public static Tensor div_no_nan(Tensor x, Tensor y, string name = null) | ||||
| { | { | ||||
| return with(ops.name_scope(name, "div_no_nan", (x, y)), name_scope => | |||||
| return tf_with(ops.name_scope(name, "div_no_nan", (x, y)), name_scope => | |||||
| { | { | ||||
| name = name_scope; | name = name_scope; | ||||
| x = ops.convert_to_tensor(x, name: "x"); | x = ops.convert_to_tensor(x, name: "x"); | ||||
| @@ -229,7 +229,7 @@ namespace Tensorflow | |||||
| public static Tensor sign(Tensor x, string name = null) | public static Tensor sign(Tensor x, string name = null) | ||||
| { | { | ||||
| return with(ops.name_scope(name, "Sign", new {x}), scope => | |||||
| return tf_with(ops.name_scope(name, "Sign", new {x}), scope => | |||||
| { | { | ||||
| x = ops.convert_to_tensor(x, name: "x"); | x = ops.convert_to_tensor(x, name: "x"); | ||||
| return gen_math_ops.sign(x); | return gen_math_ops.sign(x); | ||||
| @@ -337,7 +337,7 @@ namespace Tensorflow | |||||
| /// <returns> The reduced tensor.</returns> | /// <returns> The reduced tensor.</returns> | ||||
| public static Tensor reduce_logsumexp(Tensor input_tensor, int[] axis = null, bool keepdims = false, string name = null) | public static Tensor reduce_logsumexp(Tensor input_tensor, int[] axis = null, bool keepdims = false, string name = null) | ||||
| { | { | ||||
| return with(ops.name_scope(name, "ReduceLogSumExp", new { input_tensor }), scope => | |||||
| return tf_with(ops.name_scope(name, "ReduceLogSumExp", new { input_tensor }), scope => | |||||
| { | { | ||||
| var raw_max = reduce_max(input_tensor, axis, true); | var raw_max = reduce_max(input_tensor, axis, true); | ||||
| var my_max = array_ops.stop_gradient(array_ops.where(gen_math_ops.is_finite(raw_max), raw_max, array_ops.zeros_like(raw_max))); | var my_max = array_ops.stop_gradient(array_ops.where(gen_math_ops.is_finite(raw_max), raw_max, array_ops.zeros_like(raw_max))); | ||||
| @@ -497,7 +497,7 @@ namespace Tensorflow | |||||
| if (delta == null) | if (delta == null) | ||||
| delta = 1; | delta = 1; | ||||
| return with(ops.name_scope(name, "Range", new { start, limit, delta }), scope => | |||||
| return tf_with(ops.name_scope(name, "Range", new { start, limit, delta }), scope => | |||||
| { | { | ||||
| name = scope; | name = scope; | ||||
| var start1 = ops.convert_to_tensor(start, name: "start"); | var start1 = ops.convert_to_tensor(start, name: "start"); | ||||
| @@ -510,7 +510,7 @@ namespace Tensorflow | |||||
| public static Tensor floordiv(Tensor x, Tensor y, string name = null) | public static Tensor floordiv(Tensor x, Tensor y, string name = null) | ||||
| { | { | ||||
| return with(ops.name_scope(name, "floordiv", new { x, y }), scope => | |||||
| return tf_with(ops.name_scope(name, "floordiv", new { x, y }), scope => | |||||
| { | { | ||||
| return gen_math_ops.floor_div(x, y, scope); | return gen_math_ops.floor_div(x, y, scope); | ||||
| }); | }); | ||||
| @@ -527,7 +527,7 @@ namespace Tensorflow | |||||
| { | { | ||||
| Tensor result = null; | Tensor result = null; | ||||
| with(ops.name_scope(name, "MatMul", new Tensor[] { a, b }), scope => | |||||
| tf_with(ops.name_scope(name, "MatMul", new Tensor[] { a, b }), scope => | |||||
| { | { | ||||
| name = scope; | name = scope; | ||||
| @@ -551,7 +551,7 @@ namespace Tensorflow | |||||
| { | { | ||||
| Tensor result = null; | Tensor result = null; | ||||
| with(ops.name_scope(name, "MatMul", new Tensor[] { x, y }), scope => | |||||
| tf_with(ops.name_scope(name, "MatMul", new Tensor[] { x, y }), scope => | |||||
| { | { | ||||
| name = scope; | name = scope; | ||||
| @@ -576,7 +576,7 @@ namespace Tensorflow | |||||
| if (dt.is_floating() || dt.is_integer()) | if (dt.is_floating() || dt.is_integer()) | ||||
| return x; | return x; | ||||
| return with(ops.name_scope(name, "Conj", new List<Tensor> { x }), scope => | |||||
| return tf_with(ops.name_scope(name, "Conj", new List<Tensor> { x }), scope => | |||||
| { | { | ||||
| return x; | return x; | ||||
| @@ -591,7 +591,7 @@ namespace Tensorflow | |||||
| public static Tensor _truediv_python3(Tensor x, Tensor y, string name = null) | public static Tensor _truediv_python3(Tensor x, Tensor y, string name = null) | ||||
| { | { | ||||
| return with(ops.name_scope(name, "truediv", new { x, y }), scope => | |||||
| return tf_with(ops.name_scope(name, "truediv", new { x, y }), scope => | |||||
| { | { | ||||
| name = scope; | name = scope; | ||||
| var x_dtype = x.dtype.as_base_dtype(); | var x_dtype = x.dtype.as_base_dtype(); | ||||
| @@ -34,7 +34,7 @@ namespace Tensorflow | |||||
| float epsilon = 1e-12f, | float epsilon = 1e-12f, | ||||
| string name = null) | string name = null) | ||||
| { | { | ||||
| return with(ops.name_scope(name, "l2_normalize", new { x }), scope => | |||||
| return tf_with(ops.name_scope(name, "l2_normalize", new { x }), scope => | |||||
| { | { | ||||
| x = ops.convert_to_tensor(x, name: "x"); | x = ops.convert_to_tensor(x, name: "x"); | ||||
| var sq = math_ops.square(x); | var sq = math_ops.square(x); | ||||
| @@ -57,7 +57,7 @@ namespace Tensorflow | |||||
| string name = null, | string name = null, | ||||
| bool keep_dims = false) | bool keep_dims = false) | ||||
| { | { | ||||
| return with(ops.name_scope(name, "moments", new { x, axes }), scope => | |||||
| return tf_with(ops.name_scope(name, "moments", new { x, axes }), scope => | |||||
| { | { | ||||
| // The dynamic range of fp16 is too limited to support the collection of | // The dynamic range of fp16 is too limited to support the collection of | ||||
| // sufficient statistics. As a workaround we simply perform the operations | // sufficient statistics. As a workaround we simply perform the operations | ||||
| @@ -123,7 +123,7 @@ namespace Tensorflow | |||||
| /// <returns>number of nonzero values with type dtype</returns> | /// <returns>number of nonzero values with type dtype</returns> | ||||
| private static Tensor _count_nonzero(Tensor input_tensor, TF_DataType dtype = TF_DataType.TF_INT64) | private static Tensor _count_nonzero(Tensor input_tensor, TF_DataType dtype = TF_DataType.TF_INT64) | ||||
| { | { | ||||
| return with(ops.name_scope("count_nonzero", "count_nonzero", new { input_tensor }), scope => | |||||
| return tf_with(ops.name_scope("count_nonzero", "count_nonzero", new { input_tensor }), scope => | |||||
| { | { | ||||
| var zero = array_ops.zeros(new NumSharp.Shape(), dtype: input_tensor.dtype); | var zero = array_ops.zeros(new NumSharp.Shape(), dtype: input_tensor.dtype); | ||||
| var nonzero_count = math_ops.reduce_sum( | var nonzero_count = math_ops.reduce_sum( | ||||
| @@ -140,7 +140,7 @@ namespace Tensorflow | |||||
| /// <returns>The fraction of zeros in value, with type float32.</returns> | /// <returns>The fraction of zeros in value, with type float32.</returns> | ||||
| public static Tensor zero_fraction(Tensor value, string name = null) | public static Tensor zero_fraction(Tensor value, string name = null) | ||||
| { | { | ||||
| return with(ops.name_scope(name, "zero_fraction", new { value }), scope => | |||||
| return tf_with(ops.name_scope(name, "zero_fraction", new { value }), scope => | |||||
| { | { | ||||
| value = ops.convert_to_tensor(value, name: "value"); | value = ops.convert_to_tensor(value, name: "value"); | ||||
| Tensor size = array_ops.size(value, out_type: dtypes.int64); | Tensor size = array_ops.size(value, out_type: dtypes.int64); | ||||
| @@ -153,7 +153,7 @@ namespace Tensorflow | |||||
| () => _count_nonzero(value, dtype: dtypes.int64) | () => _count_nonzero(value, dtype: dtypes.int64) | ||||
| ); | ); | ||||
| with(ops.name_scope("counts_to_fraction"), count_scope => | |||||
| tf_with(ops.name_scope("counts_to_fraction"), count_scope => | |||||
| { | { | ||||
| var num_zero = math_ops.subtract(math_ops.cast(size, TF_DataType.TF_INT64), num_nonzero); | var num_zero = math_ops.subtract(math_ops.cast(size, TF_DataType.TF_INT64), num_nonzero); | ||||
| var num_zero_float32 = math_ops.cast(num_zero, dtype: dtypes.float32); | var num_zero_float32 = math_ops.cast(num_zero, dtype: dtypes.float32); | ||||
| @@ -50,7 +50,7 @@ namespace Tensorflow | |||||
| string data_format = null, | string data_format = null, | ||||
| string name = null) | string name = null) | ||||
| { | { | ||||
| return Python.with(ops.name_scope(name, "BiasAdd", new { value, bias }), scope => | |||||
| return Python.tf_with(ops.name_scope(name, "BiasAdd", new { value, bias }), scope => | |||||
| { | { | ||||
| name = scope; | name = scope; | ||||
| value = ops.convert_to_tensor(value, name: "input"); | value = ops.convert_to_tensor(value, name: "input"); | ||||
| @@ -70,7 +70,7 @@ namespace Tensorflow | |||||
| /// <returns></returns> | /// <returns></returns> | ||||
| public static Tensor dropout_v2(Tensor x, Tensor rate, Tensor noise_shape = null, int? seed = null, string name = null) | public static Tensor dropout_v2(Tensor x, Tensor rate, Tensor noise_shape = null, int? seed = null, string name = null) | ||||
| { | { | ||||
| return with(ops.name_scope(name, "dropout", x), scope => | |||||
| return tf_with(ops.name_scope(name, "dropout", x), scope => | |||||
| { | { | ||||
| name = scope; | name = scope; | ||||
| x = ops.convert_to_tensor(x, name: "x"); | x = ops.convert_to_tensor(x, name: "x"); | ||||
| @@ -134,7 +134,7 @@ namespace Tensorflow | |||||
| /// <returns></returns> | /// <returns></returns> | ||||
| public static Tensor max_pool(Tensor value, int[] ksize, int[] strides, string padding, string data_format = "NHWC", string name = null) | public static Tensor max_pool(Tensor value, int[] ksize, int[] strides, string padding, string data_format = "NHWC", string name = null) | ||||
| { | { | ||||
| return with(ops.name_scope(name, "MaxPool", value), scope => | |||||
| return tf_with(ops.name_scope(name, "MaxPool", value), scope => | |||||
| { | { | ||||
| name = scope; | name = scope; | ||||
| value = ops.convert_to_tensor(value, name: "input"); | value = ops.convert_to_tensor(value, name: "input"); | ||||
| @@ -171,7 +171,7 @@ namespace Tensorflow | |||||
| Tensor logits = null, string name = null) | Tensor logits = null, string name = null) | ||||
| { | { | ||||
| // Reshape logits and labels to rank 2. | // Reshape logits and labels to rank 2. | ||||
| return with(ops.name_scope(name, default_name: "SparseSoftmaxCrossEntropyWithLogits", (labels, logits)), delegate | |||||
| return tf_with(ops.name_scope(name, default_name: "SparseSoftmaxCrossEntropyWithLogits", (labels, logits)), delegate | |||||
| { | { | ||||
| labels = ops.convert_to_tensor(labels); | labels = ops.convert_to_tensor(labels); | ||||
| logits = ops.convert_to_tensor(logits); | logits = ops.convert_to_tensor(logits); | ||||
| @@ -206,7 +206,7 @@ namespace Tensorflow | |||||
| int axis = -1, | int axis = -1, | ||||
| string name = null) | string name = null) | ||||
| { | { | ||||
| return with(ops.name_scope(name, "softmax_cross_entropy_with_logits", new { logits, labels }), scope => | |||||
| return tf_with(ops.name_scope(name, "softmax_cross_entropy_with_logits", new { logits, labels }), scope => | |||||
| { | { | ||||
| name = scope; | name = scope; | ||||
| var precise_logits = logits; | var precise_logits = logits; | ||||
| @@ -37,7 +37,7 @@ namespace Tensorflow | |||||
| int? seed = null, | int? seed = null, | ||||
| string name = null) | string name = null) | ||||
| { | { | ||||
| return with(ops.name_scope(name, "random_normal", new { shape, mean, stddev }), scope => | |||||
| return tf_with(ops.name_scope(name, "random_normal", new { shape, mean, stddev }), scope => | |||||
| { | { | ||||
| var shape_tensor = _ShapeTensor(shape); | var shape_tensor = _ShapeTensor(shape); | ||||
| var mean_tensor = ops.convert_to_tensor(mean, dtype: dtype, name: "mean"); | var mean_tensor = ops.convert_to_tensor(mean, dtype: dtype, name: "mean"); | ||||
| @@ -67,7 +67,7 @@ namespace Tensorflow | |||||
| int? seed = null, | int? seed = null, | ||||
| string name = null) | string name = null) | ||||
| { | { | ||||
| return with(ops.name_scope(name, "random_uniform", new { shape, minval, maxval }), scope => | |||||
| return tf_with(ops.name_scope(name, "random_uniform", new { shape, minval, maxval }), scope => | |||||
| { | { | ||||
| name = scope; | name = scope; | ||||
| var tensorShape = _ShapeTensor(shape); | var tensorShape = _ShapeTensor(shape); | ||||
| @@ -85,7 +85,7 @@ namespace Tensorflow | |||||
| int? seed = null, | int? seed = null, | ||||
| string name = null) | string name = null) | ||||
| { | { | ||||
| return with(ops.name_scope(name, "random_uniform", new { shape, minval, maxval }), scope => | |||||
| return tf_with(ops.name_scope(name, "random_uniform", new { shape, minval, maxval }), scope => | |||||
| { | { | ||||
| name = scope; | name = scope; | ||||
| var minTensor = ops.convert_to_tensor(minval, dtype: dtype, name: "min"); | var minTensor = ops.convert_to_tensor(minval, dtype: dtype, name: "min"); | ||||
| @@ -110,7 +110,7 @@ namespace Tensorflow | |||||
| int? seed = null, | int? seed = null, | ||||
| string name = null) | string name = null) | ||||
| { | { | ||||
| return with(ops.name_scope(name, "truncated_normal", new { shape, mean, stddev }), scope => | |||||
| return tf_with(ops.name_scope(name, "truncated_normal", new { shape, mean, stddev }), scope => | |||||
| { | { | ||||
| name = scope; | name = scope; | ||||
| var shape_tensor = _ShapeTensor(shape); | var shape_tensor = _ShapeTensor(shape); | ||||
| @@ -22,7 +22,7 @@ namespace Tensorflow | |||||
| { | { | ||||
| public static Tensor broadcast_weights(Tensor weights, Tensor values) | public static Tensor broadcast_weights(Tensor weights, Tensor values) | ||||
| { | { | ||||
| return with(ops.name_scope(null, "broadcast_weights", (weights, values)), scope => | |||||
| return tf_with(ops.name_scope(null, "broadcast_weights", (weights, values)), scope => | |||||
| { | { | ||||
| values = ops.convert_to_tensor(values, name: "values"); | values = ops.convert_to_tensor(values, name: "values"); | ||||
| weights = ops.convert_to_tensor( | weights = ops.convert_to_tensor( | ||||
| @@ -75,7 +75,7 @@ namespace Tensorflow | |||||
| } | } | ||||
| [DebuggerNonUserCode()] // with "Just My Code" enabled this lets the debugger break at the origin of the exception | [DebuggerNonUserCode()] // with "Just My Code" enabled this lets the debugger break at the origin of the exception | ||||
| public static void with(IPython py, Action<IPython> action) | |||||
| public static void tf_with(IPython py, Action<IPython> action) | |||||
| { | { | ||||
| try | try | ||||
| { | { | ||||
| @@ -95,7 +95,7 @@ namespace Tensorflow | |||||
| } | } | ||||
| [DebuggerNonUserCode()] // with "Just My Code" enabled this lets the debugger break at the origin of the exception | [DebuggerNonUserCode()] // with "Just My Code" enabled this lets the debugger break at the origin of the exception | ||||
| public static void with<T>(T py, Action<T> action) where T : IPython | |||||
| public static void tf_with<T>(T py, Action<T> action) where T : IPython | |||||
| { | { | ||||
| try | try | ||||
| { | { | ||||
| @@ -115,7 +115,7 @@ namespace Tensorflow | |||||
| } | } | ||||
| [DebuggerNonUserCode()] // with "Just My Code" enabled this lets the debugger break at the origin of the exception | [DebuggerNonUserCode()] // with "Just My Code" enabled this lets the debugger break at the origin of the exception | ||||
| public static TOut with<TIn, TOut>(TIn py, Func<TIn, TOut> action) where TIn : IPython | |||||
| public static TOut tf_with<TIn, TOut>(TIn py, Func<TIn, TOut> action) where TIn : IPython | |||||
| { | { | ||||
| try | try | ||||
| { | { | ||||
| @@ -24,7 +24,7 @@ using System.Text; | |||||
| namespace Tensorflow | namespace Tensorflow | ||||
| { | { | ||||
| public class BaseSession | |||||
| public class BaseSession : DisposableObject | |||||
| { | { | ||||
| protected Graph _graph; | protected Graph _graph; | ||||
| protected bool _opened; | protected bool _opened; | ||||
| @@ -32,28 +32,23 @@ namespace Tensorflow | |||||
| protected int _current_version; | protected int _current_version; | ||||
| protected byte[] _target; | protected byte[] _target; | ||||
| protected IntPtr _session; | protected IntPtr _session; | ||||
| public Status Status; | |||||
| public Graph graph => _graph; | public Graph graph => _graph; | ||||
| public BaseSession(string target = "", Graph g = null, SessionOptions opts = null) | public BaseSession(string target = "", Graph g = null, SessionOptions opts = null) | ||||
| { | { | ||||
| _graph = g is null ? ops.get_default_graph() : g; | _graph = g is null ? ops.get_default_graph() : g; | ||||
| _graph.as_default(); | |||||
| _target = UTF8Encoding.UTF8.GetBytes(target); | _target = UTF8Encoding.UTF8.GetBytes(target); | ||||
| SessionOptions newOpts = null; | SessionOptions newOpts = null; | ||||
| if (opts == null) | if (opts == null) | ||||
| newOpts = c_api.TF_NewSessionOptions(); | |||||
| Status = new Status(); | |||||
| newOpts = new SessionOptions(); | |||||
| _session = c_api.TF_NewSession(_graph, opts ?? newOpts, Status); | |||||
| var status = new Status(); | |||||
| // dispose newOpts | |||||
| if (opts == null) | |||||
| c_api.TF_DeleteSessionOptions(newOpts); | |||||
| _session = c_api.TF_NewSession(_graph, opts ?? newOpts, status); | |||||
| Status.Check(true); | |||||
| status.Check(true); | |||||
| } | } | ||||
| public virtual NDArray run(object fetches, params FeedItem[] feed_dict) | public virtual NDArray run(object fetches, params FeedItem[] feed_dict) | ||||
| @@ -325,5 +320,19 @@ namespace Tensorflow | |||||
| { | { | ||||
| } | } | ||||
| public void close() | |||||
| { | |||||
| Dispose(); | |||||
| } | |||||
| protected override void DisposeUnManagedState(IntPtr handle) | |||||
| { | |||||
| using (var status = new Status()) | |||||
| { | |||||
| c_api.TF_DeleteSession(handle, status); | |||||
| status.Check(true); | |||||
| } | |||||
| } | |||||
| } | } | ||||
| } | } | ||||
| @@ -15,6 +15,7 @@ | |||||
| ******************************************************************************/ | ******************************************************************************/ | ||||
| using System; | using System; | ||||
| using System.Runtime.InteropServices; | |||||
| namespace Tensorflow | namespace Tensorflow | ||||
| { | { | ||||
| @@ -26,8 +27,8 @@ namespace Tensorflow | |||||
| } | } | ||||
| public Session(IntPtr handle) | |||||
| : base("", null, null) | |||||
| public Session(IntPtr handle, Graph g = null) | |||||
| : base("", g, null) | |||||
| { | { | ||||
| _session = handle; | _session = handle; | ||||
| } | } | ||||
| @@ -36,7 +37,7 @@ namespace Tensorflow | |||||
| : base("", g, opts) | : base("", g, opts) | ||||
| { | { | ||||
| if (s == null) | if (s == null) | ||||
| s = Status; | |||||
| s = new Status(); | |||||
| } | } | ||||
| public Session as_default() | public Session as_default() | ||||
| @@ -49,35 +50,32 @@ namespace Tensorflow | |||||
| { | { | ||||
| var graph = c_api.TF_NewGraph(); | var graph = c_api.TF_NewGraph(); | ||||
| var status = new Status(); | var status = new Status(); | ||||
| var opt = c_api.TF_NewSessionOptions(); | |||||
| var opt = new SessionOptions(); | |||||
| var tags = new string[] { "serve" }; | |||||
| var buffer = new TF_Buffer(); | var buffer = new TF_Buffer(); | ||||
| var sess = c_api.TF_LoadSessionFromSavedModel(opt, IntPtr.Zero, path, new string[0], 0, graph, ref buffer, status); | |||||
| //var bytes = new Buffer(buffer.data).Data; | |||||
| //var meta_graph = MetaGraphDef.Parser.ParseFrom(bytes); | |||||
| status.Check(); | |||||
| new Graph(graph).as_default(); | |||||
| return sess; | |||||
| var sess = c_api.TF_LoadSessionFromSavedModel(opt, | |||||
| IntPtr.Zero, | |||||
| path, | |||||
| tags, | |||||
| tags.Length, | |||||
| graph, | |||||
| ref buffer, | |||||
| status); | |||||
| // load graph bytes | |||||
| // var data = new byte[buffer.length]; | |||||
| // Marshal.Copy(buffer.data, data, 0, (int)buffer.length); | |||||
| // var meta_graph = MetaGraphDef.Parser.ParseFrom(data);*/ | |||||
| status.Check(true); | |||||
| return new Session(sess, g: new Graph(graph).as_default()); | |||||
| } | } | ||||
| public static implicit operator IntPtr(Session session) => session._session; | public static implicit operator IntPtr(Session session) => session._session; | ||||
| public static implicit operator Session(IntPtr handle) => new Session(handle); | public static implicit operator Session(IntPtr handle) => new Session(handle); | ||||
| public void close() | |||||
| { | |||||
| Dispose(); | |||||
| } | |||||
| public void Dispose() | |||||
| { | |||||
| c_api.TF_DeleteSession(_session, Status); | |||||
| Status.Dispose(); | |||||
| } | |||||
| public void __enter__() | public void __enter__() | ||||
| { | { | ||||
| @@ -20,37 +20,34 @@ using System.Runtime.InteropServices; | |||||
| namespace Tensorflow | namespace Tensorflow | ||||
| { | { | ||||
| public class SessionOptions : IDisposable | |||||
| public class SessionOptions : DisposableObject | |||||
| { | { | ||||
| private IntPtr _handle; | |||||
| private Status _status; | |||||
| public unsafe SessionOptions() | |||||
| public SessionOptions() | |||||
| { | { | ||||
| var opts = c_api.TF_NewSessionOptions(); | |||||
| _handle = opts; | |||||
| _status = new Status(); | |||||
| _handle = c_api.TF_NewSessionOptions(); | |||||
| } | } | ||||
| public unsafe SessionOptions(IntPtr handle) | |||||
| public SessionOptions(IntPtr handle) | |||||
| { | { | ||||
| _handle = handle; | _handle = handle; | ||||
| } | } | ||||
| public void Dispose() | |||||
| { | |||||
| c_api.TF_DeleteSessionOptions(_handle); | |||||
| _status.Dispose(); | |||||
| } | |||||
| protected override void DisposeUnManagedState(IntPtr handle) | |||||
| => c_api.TF_DeleteSessionOptions(handle); | |||||
| public Status SetConfig(ConfigProto config) | |||||
| public void SetConfig(ConfigProto config) | |||||
| { | { | ||||
| var bytes = config.ToByteArray(); | var bytes = config.ToByteArray(); | ||||
| var proto = Marshal.AllocHGlobal(bytes.Length); | var proto = Marshal.AllocHGlobal(bytes.Length); | ||||
| Marshal.Copy(bytes, 0, proto, bytes.Length); | Marshal.Copy(bytes, 0, proto, bytes.Length); | ||||
| c_api.TF_SetConfig(_handle, proto, (ulong)bytes.Length, _status); | |||||
| _status.Check(false); | |||||
| return _status; | |||||
| using (var status = new Status()) | |||||
| { | |||||
| c_api.TF_SetConfig(_handle, proto, (ulong)bytes.Length, status); | |||||
| status.Check(false); | |||||
| } | |||||
| Marshal.FreeHGlobal(proto); | |||||
| } | } | ||||
| public static implicit operator IntPtr(SessionOptions opts) => opts._handle; | public static implicit operator IntPtr(SessionOptions opts) => opts._handle; | ||||
| @@ -22,10 +22,8 @@ namespace Tensorflow | |||||
| /// TF_Status holds error information. It either has an OK code, or | /// TF_Status holds error information. It either has an OK code, or | ||||
| /// else an error code with an associated error message. | /// else an error code with an associated error message. | ||||
| /// </summary> | /// </summary> | ||||
| public class Status : IDisposable | |||||
| public class Status : DisposableObject | |||||
| { | { | ||||
| protected IntPtr _handle; | |||||
| /// <summary> | /// <summary> | ||||
| /// Error message | /// Error message | ||||
| /// </summary> | /// </summary> | ||||
| @@ -67,22 +65,7 @@ namespace Tensorflow | |||||
| return status._handle; | return status._handle; | ||||
| } | } | ||||
| public void Dispose() | |||||
| { | |||||
| IntPtr h = IntPtr.Zero; | |||||
| lock (this) | |||||
| { | |||||
| h = _handle; | |||||
| _handle = IntPtr.Zero; | |||||
| } | |||||
| if (h != IntPtr.Zero) | |||||
| c_api.TF_DeleteStatus(h); | |||||
| GC.SuppressFinalize(this); | |||||
| } | |||||
| ~Status() | |||||
| { | |||||
| Dispose(); | |||||
| } | |||||
| protected override void DisposeUnManagedState(IntPtr handle) | |||||
| => c_api.TF_DeleteStatus(handle); | |||||
| } | } | ||||
| } | } | ||||
| @@ -55,7 +55,7 @@ namespace Tensorflow.Summaries | |||||
| /// <returns></returns> | /// <returns></returns> | ||||
| public Tensor merge(Tensor[] inputs, string[] collections = null, string name = null) | public Tensor merge(Tensor[] inputs, string[] collections = null, string name = null) | ||||
| { | { | ||||
| return with(ops.name_scope(name, "Merge", inputs), delegate | |||||
| return tf_with(ops.name_scope(name, "Merge", inputs), delegate | |||||
| { | { | ||||
| var val = gen_logging_ops.merge_summary(inputs: inputs, name: name); | var val = gen_logging_ops.merge_summary(inputs: inputs, name: name); | ||||
| collect(val, collections?.ToList(), new List<string>()); | collect(val, collections?.ToList(), new List<string>()); | ||||
| @@ -88,7 +88,7 @@ namespace Tensorflow.Summaries | |||||
| public (string, string) summary_scope(string name, string family = null, string default_name = null, Tensor[] values = null) | public (string, string) summary_scope(string name, string family = null, string default_name = null, Tensor[] values = null) | ||||
| { | { | ||||
| string scope_base_name = string.IsNullOrEmpty(family) ? name : $"{family}/{name}"; | string scope_base_name = string.IsNullOrEmpty(family) ? name : $"{family}/{name}"; | ||||
| return with(ops.name_scope(scope_base_name, default_name: default_name, values), scope => | |||||
| return tf_with(ops.name_scope(scope_base_name, default_name: default_name, values), scope => | |||||
| { | { | ||||
| var tag = scope._name_scope; | var tag = scope._name_scope; | ||||
| if (string.IsNullOrEmpty(family)) | if (string.IsNullOrEmpty(family)) | ||||
| @@ -5,7 +5,7 @@ | |||||
| <AssemblyName>TensorFlow.NET</AssemblyName> | <AssemblyName>TensorFlow.NET</AssemblyName> | ||||
| <RootNamespace>Tensorflow</RootNamespace> | <RootNamespace>Tensorflow</RootNamespace> | ||||
| <TargetTensorFlow>1.14.0</TargetTensorFlow> | <TargetTensorFlow>1.14.0</TargetTensorFlow> | ||||
| <Version>0.10.3</Version> | |||||
| <Version>0.10.8</Version> | |||||
| <Authors>Haiping Chen, Meinrad Recheis</Authors> | <Authors>Haiping Chen, Meinrad Recheis</Authors> | ||||
| <Company>SciSharp STACK</Company> | <Company>SciSharp STACK</Company> | ||||
| <GeneratePackageOnBuild>true</GeneratePackageOnBuild> | <GeneratePackageOnBuild>true</GeneratePackageOnBuild> | ||||
| @@ -17,7 +17,7 @@ | |||||
| <PackageTags>TensorFlow, NumSharp, SciSharp, MachineLearning, TensorFlow.NET, C#</PackageTags> | <PackageTags>TensorFlow, NumSharp, SciSharp, MachineLearning, TensorFlow.NET, C#</PackageTags> | ||||
| <Description>Google's TensorFlow full binding in .NET Standard. | <Description>Google's TensorFlow full binding in .NET Standard. | ||||
| Docs: https://tensorflownet.readthedocs.io</Description> | Docs: https://tensorflownet.readthedocs.io</Description> | ||||
| <AssemblyVersion>0.10.3.0</AssemblyVersion> | |||||
| <AssemblyVersion>0.10.8.0</AssemblyVersion> | |||||
| <PackageReleaseNotes>Changes since v0.9.0: | <PackageReleaseNotes>Changes since v0.9.0: | ||||
| 1. Added full connected Convolution Neural Network example. | 1. Added full connected Convolution Neural Network example. | ||||
| @@ -29,9 +29,15 @@ Docs: https://tensorflownet.readthedocs.io</Description> | |||||
| 7. Add BatchMatMulGrad. | 7. Add BatchMatMulGrad. | ||||
| 8. Upgrade NumSharp. | 8. Upgrade NumSharp. | ||||
| 9. Fix strided_slice_grad type convention error. | 9. Fix strided_slice_grad type convention error. | ||||
| 10. Add AbsGrad.</PackageReleaseNotes> | |||||
| 10. Add AbsGrad. | |||||
| 11. Fix Session.LoadFromSavedModel(string). | |||||
| 12. Add Tensor operator overloads. | |||||
| 13. Fix default graph and operation issue when import model. | |||||
| 14. Fix TF_String endcode and decode. | |||||
| 15. Fix Tensor memory leak. | |||||
| 16. Rename with to tf_with that is only used to build graph purpose.</PackageReleaseNotes> | |||||
| <LangVersion>7.2</LangVersion> | <LangVersion>7.2</LangVersion> | ||||
| <FileVersion>0.10.3.0</FileVersion> | |||||
| <FileVersion>0.10.8.0</FileVersion> | |||||
| <PackageLicenseFile>LICENSE</PackageLicenseFile> | <PackageLicenseFile>LICENSE</PackageLicenseFile> | ||||
| <PackageRequireLicenseAcceptance>true</PackageRequireLicenseAcceptance> | <PackageRequireLicenseAcceptance>true</PackageRequireLicenseAcceptance> | ||||
| <SignAssembly>true</SignAssembly> | <SignAssembly>true</SignAssembly> | ||||
| @@ -62,7 +68,7 @@ Docs: https://tensorflownet.readthedocs.io</Description> | |||||
| </ItemGroup> | </ItemGroup> | ||||
| <ItemGroup> | <ItemGroup> | ||||
| <PackageReference Include="Google.Protobuf" Version="3.9.0" /> | |||||
| <PackageReference Include="Google.Protobuf" Version="3.5.1" /> | |||||
| <PackageReference Include="NumSharp" Version="0.10.6" /> | <PackageReference Include="NumSharp" Version="0.10.6" /> | ||||
| </ItemGroup> | </ItemGroup> | ||||
| @@ -15,6 +15,7 @@ | |||||
| ******************************************************************************/ | ******************************************************************************/ | ||||
| using System; | using System; | ||||
| using System.Linq; | |||||
| using static Tensorflow.Python; | using static Tensorflow.Python; | ||||
| namespace Tensorflow | namespace Tensorflow | ||||
| @@ -63,22 +64,56 @@ namespace Tensorflow | |||||
| public static Tensor operator *(long constant, Tensor tensor) => BinaryOpWrapper("mul", constant, tensor); | public static Tensor operator *(long constant, Tensor tensor) => BinaryOpWrapper("mul", constant, tensor); | ||||
| public static Tensor operator *(ulong constant, Tensor tensor) => BinaryOpWrapper("mul", constant, tensor); | public static Tensor operator *(ulong constant, Tensor tensor) => BinaryOpWrapper("mul", constant, tensor); | ||||
| public static Tensor operator /(Tensor x, Tensor y) => BinaryOpWrapper("truediv", x, y); | |||||
| public static Tensor operator /(Tensor x, float y) => BinaryOpWrapper("truediv", x, y); | |||||
| private static readonly TF_DataType[] _intTfDataTypes = { | |||||
| TF_DataType.TF_INT8, TF_DataType.TF_INT16, TF_DataType.TF_INT32, TF_DataType.TF_INT64, | |||||
| TF_DataType.TF_QINT8, TF_DataType.TF_QINT16, TF_DataType.TF_QINT32, | |||||
| TF_DataType.TF_UINT8, TF_DataType.TF_UINT16, TF_DataType.TF_UINT32, TF_DataType.TF_UINT64 | |||||
| }; | |||||
| public static Tensor operator /(double x, Tensor y) => BinaryOpWrapper("truediv", x, y); | |||||
| public static Tensor operator /(float x, Tensor y) => BinaryOpWrapper("truediv", x, y); | public static Tensor operator /(float x, Tensor y) => BinaryOpWrapper("truediv", x, y); | ||||
| public static Tensor operator /(int x, Tensor y) => BinaryOpWrapper("floordiv", x, y); | |||||
| public static Tensor operator /(Tensor x, Tensor y) => | |||||
| _intTfDataTypes.Contains(x._dtype) | |||||
| ? BinaryOpWrapper("floordiv", x, y) | |||||
| : BinaryOpWrapper("truediv", x, y); | |||||
| public static Tensor operator /(Tensor x, int y) => BinaryOpWrapper("floordiv", x, y); | |||||
| public static Tensor operator /(Tensor x, float y) => BinaryOpWrapper("truediv", x, y); | |||||
| public static Tensor operator /(Tensor x, double y) => BinaryOpWrapper("truediv", x, y); | public static Tensor operator /(Tensor x, double y) => BinaryOpWrapper("truediv", x, y); | ||||
| public static Tensor operator %(Tensor x, Tensor y) => BinaryOpWrapper("mod", x, y); | public static Tensor operator %(Tensor x, Tensor y) => BinaryOpWrapper("mod", x, y); | ||||
| public static Tensor operator >(double x, Tensor y) => gen_math_ops.greater(x, y); | |||||
| public static Tensor operator >(float x, Tensor y) => gen_math_ops.greater(x, y); | |||||
| public static Tensor operator >(int x, Tensor y) => gen_math_ops.greater(x, y); | |||||
| public static Tensor operator >(Tensor x, Tensor y) => gen_math_ops.greater(x, y); | |||||
| public static Tensor operator >(Tensor x, int y) => gen_math_ops.greater(x, y); | public static Tensor operator >(Tensor x, int y) => gen_math_ops.greater(x, y); | ||||
| public static Tensor operator >=(Tensor x, Tensor y) => gen_math_ops.greater_equal(x, y); | |||||
| public static Tensor operator >(Tensor x, float y) => gen_math_ops.greater(x, y); | public static Tensor operator >(Tensor x, float y) => gen_math_ops.greater(x, y); | ||||
| public static Tensor operator >(Tensor x, double y) => gen_math_ops.greater(x, y); | public static Tensor operator >(Tensor x, double y) => gen_math_ops.greater(x, y); | ||||
| public static Tensor operator <(double x, Tensor y) => gen_math_ops.less(x, y); | |||||
| public static Tensor operator <(float x, Tensor y) => gen_math_ops.less(x, y); | |||||
| public static Tensor operator <(int x, Tensor y) => gen_math_ops.less(x, y); | |||||
| public static Tensor operator <(Tensor x, Tensor y) => gen_math_ops.less(x, y); | |||||
| public static Tensor operator <(Tensor x, int y) => gen_math_ops.less(x, y); | public static Tensor operator <(Tensor x, int y) => gen_math_ops.less(x, y); | ||||
| public static Tensor operator <=(Tensor x, Tensor y) => gen_math_ops.less_equal(x, y); | |||||
| public static Tensor operator <(Tensor x, float y) => gen_math_ops.less(x, y); | public static Tensor operator <(Tensor x, float y) => gen_math_ops.less(x, y); | ||||
| public static Tensor operator <(Tensor x, double y) => gen_math_ops.less(x, y); | public static Tensor operator <(Tensor x, double y) => gen_math_ops.less(x, y); | ||||
| public static Tensor operator >=(double x, Tensor y) => gen_math_ops.greater_equal(x, y); | |||||
| public static Tensor operator >=(float x, Tensor y) => gen_math_ops.greater_equal(x, y); | |||||
| public static Tensor operator >=(int x, Tensor y) => gen_math_ops.greater_equal(x, y); | |||||
| public static Tensor operator >=(Tensor x, Tensor y) => gen_math_ops.greater_equal(x, y); | |||||
| public static Tensor operator >=(Tensor x, int y) => gen_math_ops.greater_equal(x, y); | |||||
| public static Tensor operator >=(Tensor x, float y) => gen_math_ops.greater_equal(x, y); | |||||
| public static Tensor operator >=(Tensor x, double y) => gen_math_ops.greater_equal(x, y); | |||||
| public static Tensor operator <=(int x, Tensor y) => gen_math_ops.less_equal(x, y); | |||||
| public static Tensor operator <=(float x, Tensor y) => gen_math_ops.less_equal(x, y); | |||||
| public static Tensor operator <=(double x, Tensor y) => gen_math_ops.less_equal(x, y); | |||||
| public static Tensor operator <=(Tensor x, Tensor y) => gen_math_ops.less_equal(x, y); | |||||
| public static Tensor operator <=(Tensor x, int y) => gen_math_ops.less_equal(x, y); | |||||
| public static Tensor operator <=(Tensor x, float y) => gen_math_ops.less_equal(x, y); | |||||
| public static Tensor operator <=(Tensor x, double y) => gen_math_ops.less_equal(x, y); | |||||
| private static Tensor BinaryOpWrapper<Tx, Ty>(string name, Tx x, Ty y) | private static Tensor BinaryOpWrapper<Tx, Ty>(string name, Tx x, Ty y) | ||||
| { | { | ||||
| TF_DataType dtype = TF_DataType.DtInvalid; | TF_DataType dtype = TF_DataType.DtInvalid; | ||||
| @@ -88,7 +123,7 @@ namespace Tensorflow | |||||
| dtype = tr.dtype.as_base_dtype(); | dtype = tr.dtype.as_base_dtype(); | ||||
| var namescope = ops.name_scope(null, name, new { x, y }); | var namescope = ops.name_scope(null, name, new { x, y }); | ||||
| return with(namescope, scope => | |||||
| return tf_with(namescope, scope => | |||||
| { | { | ||||
| Tensor result = null; | Tensor result = null; | ||||
| var x1 = ops.convert_to_tensor(x, dtype: dtype, name: "x"); | var x1 = ops.convert_to_tensor(x, dtype: dtype, name: "x"); | ||||
| @@ -99,6 +134,9 @@ namespace Tensorflow | |||||
| case "add": | case "add": | ||||
| result = gen_math_ops.add(x1, y1, name: scope); | result = gen_math_ops.add(x1, y1, name: scope); | ||||
| break; | break; | ||||
| case "floordiv": | |||||
| result = gen_math_ops.floor_div(x1, y1, name: scope); | |||||
| break; | |||||
| case "truediv": | case "truediv": | ||||
| result = gen_math_ops.real_div(x1, y1, name: scope); | result = gen_math_ops.real_div(x1, y1, name: scope); | ||||
| break; | break; | ||||
| @@ -19,6 +19,7 @@ using System; | |||||
| using System.Collections.Generic; | using System.Collections.Generic; | ||||
| using System.Linq; | using System.Linq; | ||||
| using System.Runtime.InteropServices; | using System.Runtime.InteropServices; | ||||
| using System.Text; | |||||
| using Tensorflow.Framework; | using Tensorflow.Framework; | ||||
| using static Tensorflow.Python; | using static Tensorflow.Python; | ||||
| @@ -28,10 +29,8 @@ namespace Tensorflow | |||||
| /// A tensor is a generalization of vectors and matrices to potentially higher dimensions. | /// A tensor is a generalization of vectors and matrices to potentially higher dimensions. | ||||
| /// Internally, TensorFlow represents tensors as n-dimensional arrays of base datatypes. | /// Internally, TensorFlow represents tensors as n-dimensional arrays of base datatypes. | ||||
| /// </summary> | /// </summary> | ||||
| public partial class Tensor : IDisposable, ITensorOrOperation, _TensorLike | |||||
| public partial class Tensor : DisposableObject, ITensorOrOperation, _TensorLike | |||||
| { | { | ||||
| private IntPtr _handle; | |||||
| private int _id; | private int _id; | ||||
| private Operation _op; | private Operation _op; | ||||
| @@ -48,8 +47,6 @@ namespace Tensorflow | |||||
| private int _value_index; | private int _value_index; | ||||
| public int value_index => _value_index; | public int value_index => _value_index; | ||||
| private Status status = new Status(); | |||||
| private TF_DataType _dtype = TF_DataType.DtInvalid; | private TF_DataType _dtype = TF_DataType.DtInvalid; | ||||
| public TF_DataType dtype => _handle == IntPtr.Zero ? _dtype : c_api.TF_TensorType(_handle); | public TF_DataType dtype => _handle == IntPtr.Zero ? _dtype : c_api.TF_TensorType(_handle); | ||||
| @@ -76,6 +73,7 @@ namespace Tensorflow | |||||
| if (_handle == IntPtr.Zero) | if (_handle == IntPtr.Zero) | ||||
| { | { | ||||
| var status = new Status(); | |||||
| c_api.TF_GraphGetTensorShape(op.graph, _as_tf_output(), dims, rank, status); | c_api.TF_GraphGetTensorShape(op.graph, _as_tf_output(), dims, rank, status); | ||||
| status.Check(); | status.Check(); | ||||
| } | } | ||||
| @@ -90,6 +88,8 @@ namespace Tensorflow | |||||
| set | set | ||||
| { | { | ||||
| var status = new Status(); | |||||
| if (value == null) | if (value == null) | ||||
| c_api.TF_GraphSetTensorShape(this.graph, this._as_tf_output(), null, -1, status); | c_api.TF_GraphSetTensorShape(this.graph, this._as_tf_output(), null, -1, status); | ||||
| else | else | ||||
| @@ -131,8 +131,11 @@ namespace Tensorflow | |||||
| { | { | ||||
| if (_handle == IntPtr.Zero) | if (_handle == IntPtr.Zero) | ||||
| { | { | ||||
| var status = new Status(); | |||||
| var output = _as_tf_output(); | var output = _as_tf_output(); | ||||
| return c_api.TF_GraphGetTensorNumDims(op.graph, output, status); | |||||
| int ndim = c_api.TF_GraphGetTensorNumDims(op.graph, output, status); | |||||
| status.Check(); | |||||
| return ndim; | |||||
| } | } | ||||
| else | else | ||||
| { | { | ||||
| @@ -184,6 +187,41 @@ namespace Tensorflow | |||||
| return data; | return data; | ||||
| } | } | ||||
| public unsafe string[] StringData() | |||||
| { | |||||
| // | |||||
| // TF_STRING tensors are encoded with a table of 8-byte offsets followed by TF_StringEncode-encoded bytes. | |||||
| // [offset1, offset2,...,offsetn, s1size, s1bytes, s2size, s2bytes,...,snsize,snbytes] | |||||
| // | |||||
| long size = 1; | |||||
| foreach (var s in TensorShape.Dimensions) | |||||
| size *= s; | |||||
| var buffer = new byte[size][]; | |||||
| var src = c_api.TF_TensorData(_handle); | |||||
| var srcLen = (IntPtr)(src.ToInt64() + (long)bytesize); | |||||
| src += (int)(size * 8); | |||||
| for (int i = 0; i < buffer.Length; i++) | |||||
| { | |||||
| using (var status = new Status()) | |||||
| { | |||||
| IntPtr dst = IntPtr.Zero; | |||||
| UIntPtr dstLen = UIntPtr.Zero; | |||||
| var read = c_api.TF_StringDecode((byte*)src, (UIntPtr)(srcLen.ToInt64() - src.ToInt64()), (byte**)&dst, &dstLen, status); | |||||
| status.Check(true); | |||||
| buffer[i] = new byte[(int)dstLen]; | |||||
| Marshal.Copy(dst, buffer[i], 0, buffer[i].Length); | |||||
| src += (int)read; | |||||
| } | |||||
| } | |||||
| var _str = new string[buffer.Length]; | |||||
| for (int i = 0; i < _str.Length; i++) | |||||
| _str[i] = Encoding.UTF8.GetString(buffer[i]); | |||||
| return _str; | |||||
| } | |||||
| public Tensor MaybeMove() | public Tensor MaybeMove() | ||||
| { | { | ||||
| var tensor = c_api.TF_TensorMaybeMove(_handle); | var tensor = c_api.TF_TensorMaybeMove(_handle); | ||||
| @@ -262,7 +300,7 @@ namespace Tensorflow | |||||
| index += 1; | index += 1; | ||||
| } | } | ||||
| return with(ops.name_scope(null, "strided_slice", new { begin, end, strides }), scope => | |||||
| return tf_with(ops.name_scope(null, "strided_slice", new { begin, end, strides }), scope => | |||||
| { | { | ||||
| string name = scope; | string name = scope; | ||||
| if (begin != null) | if (begin != null) | ||||
| @@ -311,7 +349,7 @@ namespace Tensorflow | |||||
| index += 1; | index += 1; | ||||
| } | } | ||||
| return with(ops.name_scope(null, "strided_slice", new { begin, end, strides }), scope => | |||||
| return tf_with(ops.name_scope(null, "strided_slice", new { begin, end, strides }), scope => | |||||
| { | { | ||||
| string name = scope; | string name = scope; | ||||
| if (begin != null) | if (begin != null) | ||||
| @@ -354,26 +392,12 @@ namespace Tensorflow | |||||
| return $"tf.Tensor '{name}' shape=({string.Join(",", shape)}) dtype={dtype}"; | return $"tf.Tensor '{name}' shape=({string.Join(",", shape)}) dtype={dtype}"; | ||||
| } | } | ||||
| public void Dispose() | |||||
| protected override void DisposeUnManagedState(IntPtr handle) | |||||
| { | { | ||||
| IntPtr h=IntPtr.Zero; | |||||
| lock (this) | |||||
| if(handle != IntPtr.Zero) | |||||
| { | { | ||||
| h = _handle; | |||||
| _handle=IntPtr.Zero; | |||||
| c_api.TF_DeleteTensor(handle); | |||||
| } | } | ||||
| if (h != IntPtr.Zero) | |||||
| c_api.TF_DeleteTensor(_handle); | |||||
| status.Dispose(); | |||||
| GC.SuppressFinalize(this); | |||||
| } | |||||
| /// <summary> | |||||
| /// Dispose the tensor when it gets garbage collected | |||||
| /// </summary> | |||||
| ~Tensor() | |||||
| { | |||||
| Dispose(); | |||||
| } | } | ||||
| public bool IsDisposed | public bool IsDisposed | ||||
| @@ -32,6 +32,12 @@ namespace Tensorflow | |||||
| [DllImport(TensorFlowLibName)] | [DllImport(TensorFlowLibName)] | ||||
| public static extern IntPtr TF_AllocateTensor(TF_DataType dtype, IntPtr dims, int num_dims, UIntPtr len); | public static extern IntPtr TF_AllocateTensor(TF_DataType dtype, IntPtr dims, int num_dims, UIntPtr len); | ||||
| [DllImport(TensorFlowLibName)] | |||||
| public static extern IntPtr TF_AllocateTensor(TF_DataType dtype, long[] dims, int num_dims, ulong len); | |||||
| [DllImport(TensorFlowLibName)] | |||||
| public static extern IntPtr TF_AllocateTensor(TF_DataType dtype, long[] dims, int num_dims, UIntPtr len); | |||||
| /// <summary> | /// <summary> | ||||
| /// returns the sizeof() for the underlying type corresponding to the given TF_DataType enum value. | /// returns the sizeof() for the underlying type corresponding to the given TF_DataType enum value. | ||||
| /// </summary> | /// </summary> | ||||
| @@ -150,5 +156,8 @@ namespace Tensorflow | |||||
| /// <returns></returns> | /// <returns></returns> | ||||
| [DllImport(TensorFlowLibName)] | [DllImport(TensorFlowLibName)] | ||||
| public static extern ulong TF_StringDecode(IntPtr src, ulong src_len, IntPtr dst, ref ulong dst_len, IntPtr status); | public static extern ulong TF_StringDecode(IntPtr src, ulong src_len, IntPtr dst, ref ulong dst_len, IntPtr status); | ||||
| [DllImport(TensorFlowLibName)] | |||||
| public static extern unsafe UIntPtr TF_StringDecode(byte* src, UIntPtr src_len, byte** dst, UIntPtr* dst_len, IntPtr status); | |||||
| } | } | ||||
| } | } | ||||
| @@ -81,7 +81,7 @@ namespace Tensorflow.Train | |||||
| var m = get_slot(var, "m"); | var m = get_slot(var, "m"); | ||||
| var m_scaled_g_values = grad * (1 - beta1_t); | var m_scaled_g_values = grad * (1 - beta1_t); | ||||
| var m_t = state_ops.assign(m, m * beta1_t, use_locking: _use_locking); | var m_t = state_ops.assign(m, m * beta1_t, use_locking: _use_locking); | ||||
| with(ops.control_dependencies(new[] { m_t }), delegate | |||||
| tf_with(ops.control_dependencies(new[] { m_t }), delegate | |||||
| { | { | ||||
| m_t = scatter_add(m, indices, m_scaled_g_values); | m_t = scatter_add(m, indices, m_scaled_g_values); | ||||
| }); | }); | ||||
| @@ -89,7 +89,7 @@ namespace Tensorflow.Train | |||||
| var v = get_slot(var, "v"); | var v = get_slot(var, "v"); | ||||
| var v_scaled_g_values = (grad * grad) * (1 - beta2_t); | var v_scaled_g_values = (grad * grad) * (1 - beta2_t); | ||||
| var v_t = state_ops.assign(v, v * beta2_t, use_locking: _use_locking); | var v_t = state_ops.assign(v, v * beta2_t, use_locking: _use_locking); | ||||
| with(ops.control_dependencies(new[] { v_t }), delegate | |||||
| tf_with(ops.control_dependencies(new[] { v_t }), delegate | |||||
| { | { | ||||
| v_t = scatter_add(v, indices, v_scaled_g_values); | v_t = scatter_add(v, indices, v_scaled_g_values); | ||||
| }); | }); | ||||
| @@ -117,7 +117,7 @@ namespace Tensorflow.Train | |||||
| var operations = new List<ITensorOrOperation>(); | var operations = new List<ITensorOrOperation>(); | ||||
| operations.AddRange(update_ops); | operations.AddRange(update_ops); | ||||
| with(ops.control_dependencies(update_ops), delegate | |||||
| tf_with(ops.control_dependencies(update_ops), delegate | |||||
| { | { | ||||
| var (beta1_power, beta2_power) = _get_beta_accumulators(); | var (beta1_power, beta2_power) = _get_beta_accumulators(); | ||||
| ops.colocate_with(beta1_power); | ops.colocate_with(beta1_power); | ||||
| @@ -151,7 +151,7 @@ namespace Tensorflow | |||||
| _create_slots(var_list); | _create_slots(var_list); | ||||
| var update_ops = new List<Operation>(); | var update_ops = new List<Operation>(); | ||||
| return with(ops.name_scope(name, Name), scope => | |||||
| return tf_with(ops.name_scope(name, Name), scope => | |||||
| { | { | ||||
| name = scope; | name = scope; | ||||
| _prepare(); | _prepare(); | ||||
| @@ -162,7 +162,7 @@ namespace Tensorflow | |||||
| continue; | continue; | ||||
| var scope_name = var.op.name; | var scope_name = var.op.name; | ||||
| with(ops.name_scope("update_" + scope_name), scope2 => | |||||
| tf_with(ops.name_scope("update_" + scope_name), scope2 => | |||||
| { | { | ||||
| var op = processor.update_op(this, grad); | var op = processor.update_op(this, grad); | ||||
| update_ops.Add(op); | update_ops.Add(op); | ||||
| @@ -176,7 +176,7 @@ namespace Tensorflow | |||||
| } | } | ||||
| else | else | ||||
| { | { | ||||
| with(ops.control_dependencies(new object[] {_finish(update_ops.ToArray(), "update")}), dep => | |||||
| tf_with(ops.control_dependencies(new object[] {_finish(update_ops.ToArray(), "update")}), dep => | |||||
| { | { | ||||
| ops.colocate_with(global_step); | ops.colocate_with(global_step); | ||||
| // TODO: port this if branch once ResourceVariable has been ported! | // TODO: port this if branch once ResourceVariable has been ported! | ||||
| @@ -102,7 +102,7 @@ namespace Tensorflow | |||||
| Tensor save_tensor = null; | Tensor save_tensor = null; | ||||
| Operation restore_op = null; | Operation restore_op = null; | ||||
| return with(ops.name_scope(name, "save", saveables.Select(x => x.op).ToArray()), scope => | |||||
| return tf_with(ops.name_scope(name, "save", saveables.Select(x => x.op).ToArray()), scope => | |||||
| { | { | ||||
| name = scope; | name = scope; | ||||
| @@ -57,7 +57,7 @@ namespace Tensorflow.Train | |||||
| { | { | ||||
| var validate_shape = shape.is_fully_defined(); | var validate_shape = shape.is_fully_defined(); | ||||
| var prefix = primary.op.name; | var prefix = primary.op.name; | ||||
| return with(new variable_scope(string.Empty, prefix + "/" + name), delegate | |||||
| return tf_with(new variable_scope(string.Empty, prefix + "/" + name), delegate | |||||
| { | { | ||||
| return _create_slot_var(primary, initializer, "", validate_shape, shape, dtype); | return _create_slot_var(primary, initializer, "", validate_shape, shape, dtype); | ||||
| }); | }); | ||||
| @@ -32,7 +32,7 @@ namespace Tensorflow | |||||
| private static Tensor op_helper<T>(string default_name, RefVariable x, T y) | private static Tensor op_helper<T>(string default_name, RefVariable x, T y) | ||||
| { | { | ||||
| var tensor1 = x.value(); | var tensor1 = x.value(); | ||||
| return with(ops.name_scope(null, default_name, new { tensor1, y }), scope => { | |||||
| return tf_with(ops.name_scope(null, default_name, new { tensor1, y }), scope => { | |||||
| var tensor2 = ops.convert_to_tensor(y, tensor1.dtype.as_base_dtype(), "y"); | var tensor2 = ops.convert_to_tensor(y, tensor1.dtype.as_base_dtype(), "y"); | ||||
| return gen_math_ops.add(tensor1, tensor2, scope); | return gen_math_ops.add(tensor1, tensor2, scope); | ||||
| }); | }); | ||||
| @@ -134,7 +134,7 @@ namespace Tensorflow | |||||
| ops.init_scope(); | ops.init_scope(); | ||||
| var values = init_from_fn ? new object[0] : new object[] { initial_value }; | var values = init_from_fn ? new object[0] : new object[] { initial_value }; | ||||
| with(ops.name_scope(name, "Variable", values), scope => | |||||
| tf_with(ops.name_scope(name, "Variable", values), scope => | |||||
| { | { | ||||
| name = scope; | name = scope; | ||||
| if (init_from_fn) | if (init_from_fn) | ||||
| @@ -148,7 +148,7 @@ namespace Tensorflow | |||||
| List = new AttrValue.Types.ListValue() | List = new AttrValue.Types.ListValue() | ||||
| }; | }; | ||||
| attr.List.S.Add(ByteString.CopyFromUtf8($"loc:{true_name}")); | attr.List.S.Add(ByteString.CopyFromUtf8($"loc:{true_name}")); | ||||
| with(ops.name_scope("Initializer"), scope2 => | |||||
| tf_with(ops.name_scope("Initializer"), scope2 => | |||||
| { | { | ||||
| _initial_value = (initial_value as Func<Tensor>)(); | _initial_value = (initial_value as Func<Tensor>)(); | ||||
| _initial_value = ops.convert_to_tensor(_initial_value, name: "initial_value", dtype: dtype); | _initial_value = ops.convert_to_tensor(_initial_value, name: "initial_value", dtype: dtype); | ||||
| @@ -56,7 +56,7 @@ namespace Tensorflow | |||||
| VariableAggregation aggregation= VariableAggregation.None) | VariableAggregation aggregation= VariableAggregation.None) | ||||
| { | { | ||||
| string full_name = !string.IsNullOrEmpty(this.name) ? this.name + "/" + name : name; | string full_name = !string.IsNullOrEmpty(this.name) ? this.name + "/" + name : name; | ||||
| return with(ops.name_scope(null), scope => | |||||
| return tf_with(ops.name_scope(null), scope => | |||||
| { | { | ||||
| if (dtype == TF_DataType.DtInvalid) | if (dtype == TF_DataType.DtInvalid) | ||||
| dtype = _dtype; | dtype = _dtype; | ||||
| @@ -295,7 +295,7 @@ namespace Tensorflow | |||||
| // inner_device_stack = default_graph._device_function_stack | // inner_device_stack = default_graph._device_function_stack | ||||
| // var outer_context = default_graph.as_default; | // var outer_context = default_graph.as_default; | ||||
| with(ops.control_dependencies(null), delegate | |||||
| tf_with(ops.control_dependencies(null), delegate | |||||
| { | { | ||||
| var outer_graph = get_default_graph(); | var outer_graph = get_default_graph(); | ||||
| // outer_device_stack = None | // outer_device_stack = None | ||||
| @@ -16,6 +16,8 @@ Here are some pre-built TensorFlow binaries you can use for each platform: | |||||
| - CPU-only: https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-cpu-windows-x86_64-1.14.0.zip | - CPU-only: https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-cpu-windows-x86_64-1.14.0.zip | ||||
| - GPU-enabled: https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-gpu-windows-x86_64-1.14.0.zip | - GPU-enabled: https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-gpu-windows-x86_64-1.14.0.zip | ||||
| ### Run in Linux | ### Run in Linux | ||||
| `Install-Package TensorFlow.NET` | `Install-Package TensorFlow.NET` | ||||
| @@ -31,10 +33,21 @@ sudo apt install libgdiplus | |||||
| More information about [System.Drawing on Linux](<https://www.hanselman.com/blog/HowDoYouUseSystemDrawingInNETCore.aspx>). | More information about [System.Drawing on Linux](<https://www.hanselman.com/blog/HowDoYouUseSystemDrawingInNETCore.aspx>). | ||||
| ### Run in Mac OS | ### Run in Mac OS | ||||
| ### GPU Tensorflow for windows | |||||
| Before running verify you installed CUDA and cuDNN | |||||
| ### Tensorflow GPU for Windows | |||||
| Before running verify you installed CUDA and cuDNN (TensorFlow v1.14 is compatible with CUDA v10.0 and cuDNN v7.4), and make sure the corresponding cuda version is compatible. | |||||
| ```powershell | |||||
| PM> Install-Package SciSharp.TensorFlow.Redist-Windows-GPU | |||||
| ``` | |||||
| ### Build from source for Windows | ### Build from source for Windows | ||||
| @@ -77,7 +77,7 @@ let run()= | |||||
| let init = tf.global_variables_initializer() | let init = tf.global_variables_initializer() | ||||
| Tensorflow.Python.``with``(tf.Session(), fun (sess:Session) -> | |||||
| Tensorflow.Python.``tf_with``(tf.Session(), fun (sess:Session) -> | |||||
| sess.run(init) |> ignore | sess.run(init) |> ignore | ||||
| // Loop over epochs | // Loop over epochs | ||||
| for epoch in [0..training_epochs] do | for epoch in [0..training_epochs] do | ||||
| @@ -18,7 +18,7 @@ using NumSharp; | |||||
| using System; | using System; | ||||
| using System.Diagnostics; | using System.Diagnostics; | ||||
| using Tensorflow; | using Tensorflow; | ||||
| using TensorFlowNET.Examples.Utility; | |||||
| using Tensorflow.Hub; | |||||
| using static Tensorflow.Python; | using static Tensorflow.Python; | ||||
| namespace TensorFlowNET.Examples | namespace TensorFlowNET.Examples | ||||
| @@ -39,7 +39,7 @@ namespace TensorFlowNET.Examples | |||||
| public int? test_size = null; | public int? test_size = null; | ||||
| public int batch_size = 1024; // The number of samples per batch | public int batch_size = 1024; // The number of samples per batch | ||||
| Datasets<DataSetMnist> mnist; | |||||
| Datasets<MnistDataSet> mnist; | |||||
| NDArray full_data_x; | NDArray full_data_x; | ||||
| int num_steps = 20; // Total steps to train | int num_steps = 20; // Total steps to train | ||||
| int k = 25; // The number of clusters | int k = 25; // The number of clusters | ||||
| @@ -52,29 +52,41 @@ namespace TensorFlowNET.Examples | |||||
| { | { | ||||
| PrepareData(); | PrepareData(); | ||||
| var graph = ImportGraph(); | var graph = ImportGraph(); | ||||
| with(tf.Session(graph), sess => | |||||
| using (var sess = tf.Session(graph)) | |||||
| { | { | ||||
| Train(sess); | Train(sess); | ||||
| }); | |||||
| } | |||||
| return accuray_test > 0.70; | return accuray_test > 0.70; | ||||
| } | } | ||||
| public void PrepareData() | public void PrepareData() | ||||
| { | { | ||||
| mnist = MNIST.read_data_sets("mnist", one_hot: true, train_size: train_size, validation_size:validation_size, test_size:test_size); | |||||
| full_data_x = mnist.train.data; | |||||
| var loader = new MnistModelLoader(); | |||||
| var setting = new ModelLoadSetting | |||||
| { | |||||
| TrainDir = ".resources/mnist", | |||||
| OneHot = true, | |||||
| TrainSize = train_size, | |||||
| ValidationSize = validation_size, | |||||
| TestSize = test_size | |||||
| }; | |||||
| mnist = loader.LoadAsync(setting).Result; | |||||
| full_data_x = mnist.Train.Data; | |||||
| // download graph meta data | // download graph meta data | ||||
| string url = "https://raw.githubusercontent.com/SciSharp/TensorFlow.NET/master/graph/kmeans.meta"; | string url = "https://raw.githubusercontent.com/SciSharp/TensorFlow.NET/master/graph/kmeans.meta"; | ||||
| Web.Download(url, "graph", "kmeans.meta"); | |||||
| loader.DownloadAsync(url, ".resources/graph", "kmeans.meta").Wait(); | |||||
| } | } | ||||
| public Graph ImportGraph() | public Graph ImportGraph() | ||||
| { | { | ||||
| var graph = tf.Graph().as_default(); | var graph = tf.Graph().as_default(); | ||||
| tf.train.import_meta_graph("graph/kmeans.meta"); | |||||
| tf.train.import_meta_graph(".resources/graph/kmeans.meta"); | |||||
| return graph; | return graph; | ||||
| } | } | ||||
| @@ -132,7 +144,7 @@ namespace TensorFlowNET.Examples | |||||
| sw.Start(); | sw.Start(); | ||||
| foreach (var i in range(idx.Length)) | foreach (var i in range(idx.Length)) | ||||
| { | { | ||||
| var x = mnist.train.labels[i]; | |||||
| var x = mnist.Train.Labels[i]; | |||||
| counts[idx[i]] += x; | counts[idx[i]] += x; | ||||
| } | } | ||||
| @@ -153,7 +165,7 @@ namespace TensorFlowNET.Examples | |||||
| var accuracy_op = tf.reduce_mean(cast); | var accuracy_op = tf.reduce_mean(cast); | ||||
| // Test Model | // Test Model | ||||
| var (test_x, test_y) = (mnist.test.data, mnist.test.labels); | |||||
| var (test_x, test_y) = (mnist.Test.Data, mnist.Test.Labels); | |||||
| result = sess.run(accuracy_op, new FeedItem(X, test_x), new FeedItem(Y, test_y)); | result = sess.run(accuracy_op, new FeedItem(X, test_x), new FeedItem(Y, test_y)); | ||||
| accuray_test = result; | accuray_test = result; | ||||
| print($"Test Accuracy: {accuray_test}"); | print($"Test Accuracy: {accuray_test}"); | ||||
| @@ -71,7 +71,7 @@ namespace TensorFlowNET.Examples | |||||
| var init = tf.global_variables_initializer(); | var init = tf.global_variables_initializer(); | ||||
| // Start training | // Start training | ||||
| return with(tf.Session(), sess => | |||||
| using (var sess = tf.Session()) | |||||
| { | { | ||||
| // Run the initializer | // Run the initializer | ||||
| sess.run(init); | sess.run(init); | ||||
| @@ -114,7 +114,7 @@ namespace TensorFlowNET.Examples | |||||
| Console.WriteLine($"Absolute mean square loss difference: {diff}"); | Console.WriteLine($"Absolute mean square loss difference: {diff}"); | ||||
| return diff < 0.01; | return diff < 0.01; | ||||
| }); | |||||
| } | |||||
| } | } | ||||
| public void PrepareData() | public void PrepareData() | ||||
| @@ -19,7 +19,7 @@ using System; | |||||
| using System.Diagnostics; | using System.Diagnostics; | ||||
| using System.IO; | using System.IO; | ||||
| using Tensorflow; | using Tensorflow; | ||||
| using TensorFlowNET.Examples.Utility; | |||||
| using Tensorflow.Hub; | |||||
| using static Tensorflow.Python; | using static Tensorflow.Python; | ||||
| namespace TensorFlowNET.Examples | namespace TensorFlowNET.Examples | ||||
| @@ -45,7 +45,7 @@ namespace TensorFlowNET.Examples | |||||
| private float learning_rate = 0.01f; | private float learning_rate = 0.01f; | ||||
| private int display_step = 1; | private int display_step = 1; | ||||
| Datasets<DataSetMnist> mnist; | |||||
| Datasets<MnistDataSet> mnist; | |||||
| public bool Run() | public bool Run() | ||||
| { | { | ||||
| @@ -73,7 +73,7 @@ namespace TensorFlowNET.Examples | |||||
| var sw = new Stopwatch(); | var sw = new Stopwatch(); | ||||
| return with(tf.Session(), sess => | |||||
| using (var sess = tf.Session()) | |||||
| { | { | ||||
| // Run the initializer | // Run the initializer | ||||
| sess.run(init); | sess.run(init); | ||||
| @@ -84,11 +84,11 @@ namespace TensorFlowNET.Examples | |||||
| sw.Start(); | sw.Start(); | ||||
| var avg_cost = 0.0f; | var avg_cost = 0.0f; | ||||
| var total_batch = mnist.train.num_examples / batch_size; | |||||
| var total_batch = mnist.Train.NumOfExamples / batch_size; | |||||
| // Loop over all batches | // Loop over all batches | ||||
| foreach (var i in range(total_batch)) | foreach (var i in range(total_batch)) | ||||
| { | { | ||||
| var (batch_xs, batch_ys) = mnist.train.next_batch(batch_size); | |||||
| var (batch_xs, batch_ys) = mnist.Train.GetNextBatch(batch_size); | |||||
| // Run optimization op (backprop) and cost op (to get loss value) | // Run optimization op (backprop) and cost op (to get loss value) | ||||
| var result = sess.run(new object[] { optimizer, cost }, | var result = sess.run(new object[] { optimizer, cost }, | ||||
| new FeedItem(x, batch_xs), | new FeedItem(x, batch_xs), | ||||
| @@ -115,32 +115,32 @@ namespace TensorFlowNET.Examples | |||||
| var correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1)); | var correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1)); | ||||
| // Calculate accuracy | // Calculate accuracy | ||||
| var accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)); | var accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)); | ||||
| float acc = accuracy.eval(new FeedItem(x, mnist.test.data), new FeedItem(y, mnist.test.labels)); | |||||
| float acc = accuracy.eval(new FeedItem(x, mnist.Test.Data), new FeedItem(y, mnist.Test.Labels)); | |||||
| print($"Accuracy: {acc.ToString("F4")}"); | print($"Accuracy: {acc.ToString("F4")}"); | ||||
| return acc > 0.9; | return acc > 0.9; | ||||
| }); | |||||
| } | |||||
| } | } | ||||
| public void PrepareData() | public void PrepareData() | ||||
| { | { | ||||
| mnist = MNIST.read_data_sets("mnist", one_hot: true, train_size: train_size, validation_size: validation_size, test_size: test_size); | |||||
| mnist = MnistModelLoader.LoadAsync(".resources/mnist", oneHot: true, trainSize: train_size, validationSize: validation_size, testSize: test_size).Result; | |||||
| } | } | ||||
| public void SaveModel(Session sess) | public void SaveModel(Session sess) | ||||
| { | { | ||||
| var saver = tf.train.Saver(); | var saver = tf.train.Saver(); | ||||
| var save_path = saver.save(sess, "logistic_regression/model.ckpt"); | |||||
| tf.train.write_graph(sess.graph, "logistic_regression", "model.pbtxt", as_text: true); | |||||
| var save_path = saver.save(sess, ".resources/logistic_regression/model.ckpt"); | |||||
| tf.train.write_graph(sess.graph, ".resources/logistic_regression", "model.pbtxt", as_text: true); | |||||
| FreezeGraph.freeze_graph(input_graph: "logistic_regression/model.pbtxt", | |||||
| FreezeGraph.freeze_graph(input_graph: ".resources/logistic_regression/model.pbtxt", | |||||
| input_saver: "", | input_saver: "", | ||||
| input_binary: false, | input_binary: false, | ||||
| input_checkpoint: "logistic_regression/model.ckpt", | |||||
| input_checkpoint: ".resources/logistic_regression/model.ckpt", | |||||
| output_node_names: "Softmax", | output_node_names: "Softmax", | ||||
| restore_op_name: "save/restore_all", | restore_op_name: "save/restore_all", | ||||
| filename_tensor_name: "save/Const:0", | filename_tensor_name: "save/Const:0", | ||||
| output_graph: "logistic_regression/model.pb", | |||||
| output_graph: ".resources/logistic_regression/model.pb", | |||||
| clear_devices: true, | clear_devices: true, | ||||
| initializer_nodes: ""); | initializer_nodes: ""); | ||||
| } | } | ||||
| @@ -148,7 +148,7 @@ namespace TensorFlowNET.Examples | |||||
| public void Predict(Session sess) | public void Predict(Session sess) | ||||
| { | { | ||||
| var graph = new Graph().as_default(); | var graph = new Graph().as_default(); | ||||
| graph.Import(Path.Join("logistic_regression", "model.pb")); | |||||
| graph.Import(Path.Join(".resources/logistic_regression", "model.pb")); | |||||
| // restoring the model | // restoring the model | ||||
| // var saver = tf.train.import_meta_graph("logistic_regression/tensorflowModel.ckpt.meta"); | // var saver = tf.train.import_meta_graph("logistic_regression/tensorflowModel.ckpt.meta"); | ||||
| @@ -159,7 +159,7 @@ namespace TensorFlowNET.Examples | |||||
| var input = x.outputs[0]; | var input = x.outputs[0]; | ||||
| // predict | // predict | ||||
| var (batch_xs, batch_ys) = mnist.train.next_batch(10); | |||||
| var (batch_xs, batch_ys) = mnist.Train.GetNextBatch(10); | |||||
| var results = sess.run(output, new FeedItem(input, batch_xs[np.arange(1)])); | var results = sess.run(output, new FeedItem(input, batch_xs[np.arange(1)])); | ||||
| if (results.argmax() == (batch_ys[0] as NDArray).argmax()) | if (results.argmax() == (batch_ys[0] as NDArray).argmax()) | ||||
| @@ -48,14 +48,14 @@ namespace TensorFlowNET.Examples | |||||
| float y_max = X.amax(0).Data<float>(1) + 0.5f; | float y_max = X.amax(0).Data<float>(1) + 0.5f; | ||||
| var (xx, yy) = np.meshgrid(np.linspace(x_min, x_max, 30), np.linspace(y_min, y_max, 30)); | var (xx, yy) = np.meshgrid(np.linspace(x_min, x_max, 30), np.linspace(y_min, y_max, 30)); | ||||
| with(tf.Session(), sess => | |||||
| using (var sess = tf.Session()) | |||||
| { | { | ||||
| //var samples = np.vstack<float>(xx.ravel(), yy.ravel()); | //var samples = np.vstack<float>(xx.ravel(), yy.ravel()); | ||||
| //samples = np.transpose(samples); | //samples = np.transpose(samples); | ||||
| var array = np.Load<double[,]>(Path.Join("nb", "nb_example.npy")); | var array = np.Load<double[,]>(Path.Join("nb", "nb_example.npy")); | ||||
| var samples = np.array(array).astype(np.float32); | var samples = np.array(array).astype(np.float32); | ||||
| var Z = sess.run(predict(samples)); | var Z = sess.run(predict(samples)); | ||||
| }); | |||||
| } | |||||
| return true; | return true; | ||||
| } | } | ||||
| @@ -17,7 +17,7 @@ | |||||
| using NumSharp; | using NumSharp; | ||||
| using System; | using System; | ||||
| using Tensorflow; | using Tensorflow; | ||||
| using TensorFlowNET.Examples.Utility; | |||||
| using Tensorflow.Hub; | |||||
| using static Tensorflow.Python; | using static Tensorflow.Python; | ||||
| namespace TensorFlowNET.Examples | namespace TensorFlowNET.Examples | ||||
| @@ -31,7 +31,7 @@ namespace TensorFlowNET.Examples | |||||
| { | { | ||||
| public bool Enabled { get; set; } = true; | public bool Enabled { get; set; } = true; | ||||
| public string Name => "Nearest Neighbor"; | public string Name => "Nearest Neighbor"; | ||||
| Datasets<DataSetMnist> mnist; | |||||
| Datasets<MnistDataSet> mnist; | |||||
| NDArray Xtr, Ytr, Xte, Yte; | NDArray Xtr, Ytr, Xte, Yte; | ||||
| public int? TrainSize = null; | public int? TrainSize = null; | ||||
| public int ValidationSize = 5000; | public int ValidationSize = 5000; | ||||
| @@ -54,7 +54,7 @@ namespace TensorFlowNET.Examples | |||||
| float accuracy = 0f; | float accuracy = 0f; | ||||
| // Initialize the variables (i.e. assign their default value) | // Initialize the variables (i.e. assign their default value) | ||||
| var init = tf.global_variables_initializer(); | var init = tf.global_variables_initializer(); | ||||
| with(tf.Session(), sess => | |||||
| using (var sess = tf.Session()) | |||||
| { | { | ||||
| // Run the initializer | // Run the initializer | ||||
| sess.run(init); | sess.run(init); | ||||
| @@ -77,17 +77,17 @@ namespace TensorFlowNET.Examples | |||||
| } | } | ||||
| print($"Accuracy: {accuracy}"); | print($"Accuracy: {accuracy}"); | ||||
| }); | |||||
| } | |||||
| return accuracy > 0.8; | return accuracy > 0.8; | ||||
| } | } | ||||
| public void PrepareData() | public void PrepareData() | ||||
| { | { | ||||
| mnist = MNIST.read_data_sets("mnist", one_hot: true, train_size: TrainSize, validation_size:ValidationSize, test_size:TestSize); | |||||
| mnist = MnistModelLoader.LoadAsync(".resources/mnist", oneHot: true, trainSize: TrainSize, validationSize: ValidationSize, testSize: TestSize).Result; | |||||
| // In this example, we limit mnist data | // In this example, we limit mnist data | ||||
| (Xtr, Ytr) = mnist.train.next_batch(TrainSize==null ? 5000 : TrainSize.Value / 100); // 5000 for training (nn candidates) | |||||
| (Xte, Yte) = mnist.test.next_batch(TestSize==null ? 200 : TestSize.Value / 100); // 200 for testing | |||||
| (Xtr, Ytr) = mnist.Train.GetNextBatch(TrainSize == null ? 5000 : TrainSize.Value / 100); // 5000 for training (nn candidates) | |||||
| (Xte, Yte) = mnist.Test.GetNextBatch(TestSize == null ? 200 : TestSize.Value / 100); // 200 for testing | |||||
| } | } | ||||
| public Graph ImportGraph() | public Graph ImportGraph() | ||||
| @@ -90,7 +90,7 @@ namespace TensorFlowNET.Examples | |||||
| var init = tf.global_variables_initializer(); | var init = tf.global_variables_initializer(); | ||||
| float loss_value = 0; | float loss_value = 0; | ||||
| // Start tf session | // Start tf session | ||||
| with(tf.Session(graph), sess => | |||||
| using (var sess = tf.Session(graph)) | |||||
| { | { | ||||
| sess.run(init); | sess.run(init); | ||||
| var step = 0; | var step = 0; | ||||
| @@ -110,7 +110,7 @@ namespace TensorFlowNET.Examples | |||||
| Console.WriteLine($"Step {step} loss: {loss_value}"); | Console.WriteLine($"Step {step} loss: {loss_value}"); | ||||
| } | } | ||||
| Console.WriteLine($"Final loss: {loss_value}"); | Console.WriteLine($"Final loss: {loss_value}"); | ||||
| }); | |||||
| } | |||||
| return loss_value; | return loss_value; | ||||
| } | } | ||||
| @@ -128,7 +128,7 @@ namespace TensorFlowNET.Examples | |||||
| float loss_value = 0; | float loss_value = 0; | ||||
| // Start tf session | // Start tf session | ||||
| with(tf.Session(graph), sess => | |||||
| using (var sess = tf.Session(graph)) | |||||
| { | { | ||||
| sess.run(init); | sess.run(init); | ||||
| var step = 0; | var step = 0; | ||||
| @@ -143,7 +143,7 @@ namespace TensorFlowNET.Examples | |||||
| Console.WriteLine($"Step {step} loss: {loss_value}"); | Console.WriteLine($"Step {step} loss: {loss_value}"); | ||||
| } | } | ||||
| Console.WriteLine($"Final loss: {loss_value}"); | Console.WriteLine($"Final loss: {loss_value}"); | ||||
| }); | |||||
| } | |||||
| return loss_value; | return loss_value; | ||||
| } | } | ||||
| @@ -134,7 +134,7 @@ namespace TensorFlowNET.Examples | |||||
| 3, 3, 2)); | 3, 3, 2)); | ||||
| var batchMul = tf.batch_matmul(firstTensor, secondTensor); | var batchMul = tf.batch_matmul(firstTensor, secondTensor); | ||||
| var checkTensor = np.array<float>(0, 6, 0, 15, 0, 24, 3, 1, 6, 4, 9, 7, 6, 0, 15, 0, 24, 0); | var checkTensor = np.array<float>(0, 6, 0, 15, 0, 24, 3, 1, 6, 4, 9, 7, 6, 0, 15, 0, 24, 0); | ||||
| return with(tf.Session(), sess => | |||||
| using (var sess = tf.Session()) | |||||
| { | { | ||||
| var result = sess.run(batchMul); | var result = sess.run(batchMul); | ||||
| Console.WriteLine(result.ToString()); | Console.WriteLine(result.ToString()); | ||||
| @@ -152,7 +152,7 @@ namespace TensorFlowNET.Examples | |||||
| // [24, 0]]]) | // [24, 0]]]) | ||||
| return np.reshape(result, 18) | return np.reshape(result, 18) | ||||
| .array_equal(checkTensor); | .array_equal(checkTensor); | ||||
| }); | |||||
| } | |||||
| } | } | ||||
| public void PrepareData() | public void PrepareData() | ||||
| @@ -25,13 +25,13 @@ namespace TensorFlowNET.Examples | |||||
| var hello = tf.constant(str); | var hello = tf.constant(str); | ||||
| // Start tf session | // Start tf session | ||||
| return with(tf.Session(), sess => | |||||
| using (var sess = tf.Session()) | |||||
| { | { | ||||
| // Run the op | // Run the op | ||||
| var result = sess.run(hello); | var result = sess.run(hello); | ||||
| Console.WriteLine(result.ToString()); | Console.WriteLine(result.ToString()); | ||||
| return result.ToString().Equals(str); | return result.ToString().Equals(str); | ||||
| }); | |||||
| } | |||||
| } | } | ||||
| public void PrepareData() | public void PrepareData() | ||||
| @@ -0,0 +1,74 @@ | |||||
| /***************************************************************************** | |||||
| Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. | |||||
| Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| you may not use this file except in compliance with the License. | |||||
| You may obtain a copy of the License at | |||||
| http://www.apache.org/licenses/LICENSE-2.0 | |||||
| Unless required by applicable law or agreed to in writing, software | |||||
| distributed under the License is distributed on an "AS IS" BASIS, | |||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| See the License for the specific language governing permissions and | |||||
| limitations under the License. | |||||
| ******************************************************************************/ | |||||
| using System; | |||||
| using System.Collections.Generic; | |||||
| using System.Text; | |||||
| using Tensorflow; | |||||
| using TensorFlowDatasets; | |||||
| using static Tensorflow.Python; | |||||
| namespace TensorFlowNET.Examples | |||||
| { | |||||
| /// <summary> | |||||
| /// https://www.tensorflow.org/tutorials/images/deep_cnn | |||||
| /// </summary> | |||||
| public class CIFAR10_CNN : IExample | |||||
| { | |||||
| public bool Enabled { get; set; } = true; | |||||
| public bool IsImportingGraph { get; set; } = false; | |||||
| public string Name => "CIFAR-10 CNN"; | |||||
| public bool Run() | |||||
| { | |||||
| PrepareData(); | |||||
| return true; | |||||
| } | |||||
| public Graph BuildGraph() | |||||
| { | |||||
| throw new NotImplementedException(); | |||||
| } | |||||
| public Graph ImportGraph() | |||||
| { | |||||
| throw new NotImplementedException(); | |||||
| } | |||||
| public void Predict(Session sess) | |||||
| { | |||||
| throw new NotImplementedException(); | |||||
| } | |||||
| public void PrepareData() | |||||
| { | |||||
| var tfds = new DatasetBuilder(); | |||||
| tfds.download_and_prepare(); | |||||
| } | |||||
| public void Test(Session sess) | |||||
| { | |||||
| throw new NotImplementedException(); | |||||
| } | |||||
| public void Train(Session sess) | |||||
| { | |||||
| throw new NotImplementedException(); | |||||
| } | |||||
| } | |||||
| } | |||||
| @@ -16,11 +16,12 @@ | |||||
| using NumSharp; | using NumSharp; | ||||
| using System; | using System; | ||||
| using System.Diagnostics; | |||||
| using Tensorflow; | using Tensorflow; | ||||
| using TensorFlowNET.Examples.Utility; | |||||
| using Tensorflow.Hub; | |||||
| using static Tensorflow.Python; | using static Tensorflow.Python; | ||||
| namespace TensorFlowNET.Examples.ImageProcess | |||||
| namespace TensorFlowNET.Examples | |||||
| { | { | ||||
| /// <summary> | /// <summary> | ||||
| /// Convolutional Neural Network classifier for Hand Written Digits | /// Convolutional Neural Network classifier for Hand Written Digits | ||||
| @@ -45,7 +46,7 @@ namespace TensorFlowNET.Examples.ImageProcess | |||||
| int epochs = 5; // accuracy > 98% | int epochs = 5; // accuracy > 98% | ||||
| int batch_size = 100; | int batch_size = 100; | ||||
| float learning_rate = 0.001f; | float learning_rate = 0.001f; | ||||
| Datasets<DataSetMnist> mnist; | |||||
| Datasets<MnistDataSet> mnist; | |||||
| // Network configuration | // Network configuration | ||||
| // 1st Convolutional Layer | // 1st Convolutional Layer | ||||
| @@ -78,11 +79,11 @@ namespace TensorFlowNET.Examples.ImageProcess | |||||
| PrepareData(); | PrepareData(); | ||||
| BuildGraph(); | BuildGraph(); | ||||
| with(tf.Session(), sess => | |||||
| using (var sess = tf.Session()) | |||||
| { | { | ||||
| Train(sess); | Train(sess); | ||||
| Test(sess); | Test(sess); | ||||
| }); | |||||
| } | |||||
| return loss_test < 0.05 && accuracy_test > 0.98; | return loss_test < 0.05 && accuracy_test > 0.98; | ||||
| } | } | ||||
| @@ -91,7 +92,7 @@ namespace TensorFlowNET.Examples.ImageProcess | |||||
| { | { | ||||
| var graph = new Graph().as_default(); | var graph = new Graph().as_default(); | ||||
| with(tf.name_scope("Input"), delegate | |||||
| tf_with(tf.name_scope("Input"), delegate | |||||
| { | { | ||||
| // Placeholders for inputs (x) and outputs(y) | // Placeholders for inputs (x) and outputs(y) | ||||
| x = tf.placeholder(tf.float32, shape: (-1, img_h, img_w, n_channels), name: "X"); | x = tf.placeholder(tf.float32, shape: (-1, img_h, img_w, n_channels), name: "X"); | ||||
| @@ -106,25 +107,25 @@ namespace TensorFlowNET.Examples.ImageProcess | |||||
| var fc1 = fc_layer(layer_flat, h1, "FC1", use_relu: true); | var fc1 = fc_layer(layer_flat, h1, "FC1", use_relu: true); | ||||
| var output_logits = fc_layer(fc1, n_classes, "OUT", use_relu: false); | var output_logits = fc_layer(fc1, n_classes, "OUT", use_relu: false); | ||||
| with(tf.variable_scope("Train"), delegate | |||||
| tf_with(tf.variable_scope("Train"), delegate | |||||
| { | { | ||||
| with(tf.variable_scope("Loss"), delegate | |||||
| tf_with(tf.variable_scope("Loss"), delegate | |||||
| { | { | ||||
| loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels: y, logits: output_logits), name: "loss"); | loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels: y, logits: output_logits), name: "loss"); | ||||
| }); | }); | ||||
| with(tf.variable_scope("Optimizer"), delegate | |||||
| tf_with(tf.variable_scope("Optimizer"), delegate | |||||
| { | { | ||||
| optimizer = tf.train.AdamOptimizer(learning_rate: learning_rate, name: "Adam-op").minimize(loss); | optimizer = tf.train.AdamOptimizer(learning_rate: learning_rate, name: "Adam-op").minimize(loss); | ||||
| }); | }); | ||||
| with(tf.variable_scope("Accuracy"), delegate | |||||
| tf_with(tf.variable_scope("Accuracy"), delegate | |||||
| { | { | ||||
| var correct_prediction = tf.equal(tf.argmax(output_logits, 1), tf.argmax(y, 1), name: "correct_pred"); | var correct_prediction = tf.equal(tf.argmax(output_logits, 1), tf.argmax(y, 1), name: "correct_pred"); | ||||
| accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name: "accuracy"); | accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name: "accuracy"); | ||||
| }); | }); | ||||
| with(tf.variable_scope("Prediction"), delegate | |||||
| tf_with(tf.variable_scope("Prediction"), delegate | |||||
| { | { | ||||
| cls_prediction = tf.argmax(output_logits, axis: 1, name: "predictions"); | cls_prediction = tf.argmax(output_logits, axis: 1, name: "predictions"); | ||||
| }); | }); | ||||
| @@ -144,6 +145,8 @@ namespace TensorFlowNET.Examples.ImageProcess | |||||
| float loss_val = 100.0f; | float loss_val = 100.0f; | ||||
| float accuracy_val = 0f; | float accuracy_val = 0f; | ||||
| var sw = new Stopwatch(); | |||||
| sw.Start(); | |||||
| foreach (var epoch in range(epochs)) | foreach (var epoch in range(epochs)) | ||||
| { | { | ||||
| print($"Training epoch: {epoch + 1}"); | print($"Training epoch: {epoch + 1}"); | ||||
| @@ -165,7 +168,8 @@ namespace TensorFlowNET.Examples.ImageProcess | |||||
| var result = sess.run(new[] { loss, accuracy }, new FeedItem(x, x_batch), new FeedItem(y, y_batch)); | var result = sess.run(new[] { loss, accuracy }, new FeedItem(x, x_batch), new FeedItem(y, y_batch)); | ||||
| loss_val = result[0]; | loss_val = result[0]; | ||||
| accuracy_val = result[1]; | accuracy_val = result[1]; | ||||
| print($"iter {iteration.ToString("000")}: Loss={loss_val.ToString("0.0000")}, Training Accuracy={accuracy_val.ToString("P")}"); | |||||
| print($"iter {iteration.ToString("000")}: Loss={loss_val.ToString("0.0000")}, Training Accuracy={accuracy_val.ToString("P")} {sw.ElapsedMilliseconds}ms"); | |||||
| sw.Restart(); | |||||
| } | } | ||||
| } | } | ||||
| @@ -200,7 +204,7 @@ namespace TensorFlowNET.Examples.ImageProcess | |||||
| /// <returns>The output array</returns> | /// <returns>The output array</returns> | ||||
| private Tensor conv_layer(Tensor x, int filter_size, int num_filters, int stride, string name) | private Tensor conv_layer(Tensor x, int filter_size, int num_filters, int stride, string name) | ||||
| { | { | ||||
| return with(tf.variable_scope(name), delegate { | |||||
| return tf_with(tf.variable_scope(name), delegate { | |||||
| var num_in_channel = x.shape[x.NDims - 1]; | var num_in_channel = x.shape[x.NDims - 1]; | ||||
| var shape = new[] { filter_size, filter_size, num_in_channel, num_filters }; | var shape = new[] { filter_size, filter_size, num_in_channel, num_filters }; | ||||
| @@ -240,7 +244,7 @@ namespace TensorFlowNET.Examples.ImageProcess | |||||
| /// <returns>flattened array</returns> | /// <returns>flattened array</returns> | ||||
| private Tensor flatten_layer(Tensor layer) | private Tensor flatten_layer(Tensor layer) | ||||
| { | { | ||||
| return with(tf.variable_scope("Flatten_layer"), delegate | |||||
| return tf_with(tf.variable_scope("Flatten_layer"), delegate | |||||
| { | { | ||||
| var layer_shape = layer.TensorShape; | var layer_shape = layer.TensorShape; | ||||
| var num_features = layer_shape[new Slice(1, 4)].Size; | var num_features = layer_shape[new Slice(1, 4)].Size; | ||||
| @@ -289,7 +293,7 @@ namespace TensorFlowNET.Examples.ImageProcess | |||||
| /// <returns>The output array</returns> | /// <returns>The output array</returns> | ||||
| private Tensor fc_layer(Tensor x, int num_units, string name, bool use_relu = true) | private Tensor fc_layer(Tensor x, int num_units, string name, bool use_relu = true) | ||||
| { | { | ||||
| return with(tf.variable_scope(name), delegate | |||||
| return tf_with(tf.variable_scope(name), delegate | |||||
| { | { | ||||
| var in_dim = x.shape[1]; | var in_dim = x.shape[1]; | ||||
| @@ -306,14 +310,14 @@ namespace TensorFlowNET.Examples.ImageProcess | |||||
| public void PrepareData() | public void PrepareData() | ||||
| { | { | ||||
| mnist = MNIST.read_data_sets("mnist", one_hot: true); | |||||
| (x_train, y_train) = Reformat(mnist.train.data, mnist.train.labels); | |||||
| (x_valid, y_valid) = Reformat(mnist.validation.data, mnist.validation.labels); | |||||
| (x_test, y_test) = Reformat(mnist.test.data, mnist.test.labels); | |||||
| mnist = MnistModelLoader.LoadAsync(".resources/mnist", oneHot: true).Result; | |||||
| (x_train, y_train) = Reformat(mnist.Train.Data, mnist.Train.Labels); | |||||
| (x_valid, y_valid) = Reformat(mnist.Validation.Data, mnist.Validation.Labels); | |||||
| (x_test, y_test) = Reformat(mnist.Test.Data, mnist.Test.Labels); | |||||
| print("Size of:"); | print("Size of:"); | ||||
| print($"- Training-set:\t\t{len(mnist.train.data)}"); | |||||
| print($"- Validation-set:\t{len(mnist.validation.data)}"); | |||||
| print($"- Training-set:\t\t{len(mnist.Train.Data)}"); | |||||
| print($"- Validation-set:\t{len(mnist.Validation.Data)}"); | |||||
| } | } | ||||
| /// <summary> | /// <summary> | ||||
| @@ -17,10 +17,10 @@ | |||||
| using NumSharp; | using NumSharp; | ||||
| using System; | using System; | ||||
| using Tensorflow; | using Tensorflow; | ||||
| using TensorFlowNET.Examples.Utility; | |||||
| using Tensorflow.Hub; | |||||
| using static Tensorflow.Python; | using static Tensorflow.Python; | ||||
| namespace TensorFlowNET.Examples.ImageProcess | |||||
| namespace TensorFlowNET.Examples | |||||
| { | { | ||||
| /// <summary> | /// <summary> | ||||
| /// Neural Network classifier for Hand Written Digits | /// Neural Network classifier for Hand Written Digits | ||||
| @@ -44,7 +44,7 @@ namespace TensorFlowNET.Examples.ImageProcess | |||||
| int batch_size = 100; | int batch_size = 100; | ||||
| float learning_rate = 0.001f; | float learning_rate = 0.001f; | ||||
| int h1 = 200; // number of nodes in the 1st hidden layer | int h1 = 200; // number of nodes in the 1st hidden layer | ||||
| Datasets<DataSetMnist> mnist; | |||||
| Datasets<MnistDataSet> mnist; | |||||
| Tensor x, y; | Tensor x, y; | ||||
| Tensor loss, accuracy; | Tensor loss, accuracy; | ||||
| @@ -59,11 +59,11 @@ namespace TensorFlowNET.Examples.ImageProcess | |||||
| PrepareData(); | PrepareData(); | ||||
| BuildGraph(); | BuildGraph(); | ||||
| with(tf.Session(), sess => | |||||
| using (var sess = tf.Session()) | |||||
| { | { | ||||
| Train(sess); | Train(sess); | ||||
| Test(sess); | Test(sess); | ||||
| }); | |||||
| }; | |||||
| return loss_test < 0.09 && accuracy_test > 0.95; | return loss_test < 0.09 && accuracy_test > 0.95; | ||||
| } | } | ||||
| @@ -121,13 +121,13 @@ namespace TensorFlowNET.Examples.ImageProcess | |||||
| public void PrepareData() | public void PrepareData() | ||||
| { | { | ||||
| mnist = MNIST.read_data_sets("mnist", one_hot: true); | |||||
| mnist = MnistModelLoader.LoadAsync(".resources/mnist", oneHot: true).Result; | |||||
| } | } | ||||
| public void Train(Session sess) | public void Train(Session sess) | ||||
| { | { | ||||
| // Number of training iterations in each epoch | // Number of training iterations in each epoch | ||||
| var num_tr_iter = mnist.train.labels.len / batch_size; | |||||
| var num_tr_iter = mnist.Train.Labels.len / batch_size; | |||||
| var init = tf.global_variables_initializer(); | var init = tf.global_variables_initializer(); | ||||
| sess.run(init); | sess.run(init); | ||||
| @@ -139,13 +139,13 @@ namespace TensorFlowNET.Examples.ImageProcess | |||||
| { | { | ||||
| print($"Training epoch: {epoch + 1}"); | print($"Training epoch: {epoch + 1}"); | ||||
| // Randomly shuffle the training data at the beginning of each epoch | // Randomly shuffle the training data at the beginning of each epoch | ||||
| var (x_train, y_train) = randomize(mnist.train.data, mnist.train.labels); | |||||
| var (x_train, y_train) = mnist.Randomize(mnist.Train.Data, mnist.Train.Labels); | |||||
| foreach (var iteration in range(num_tr_iter)) | foreach (var iteration in range(num_tr_iter)) | ||||
| { | { | ||||
| var start = iteration * batch_size; | var start = iteration * batch_size; | ||||
| var end = (iteration + 1) * batch_size; | var end = (iteration + 1) * batch_size; | ||||
| var (x_batch, y_batch) = get_next_batch(x_train, y_train, start, end); | |||||
| var (x_batch, y_batch) = mnist.GetNextBatch(x_train, y_train, start, end); | |||||
| // Run optimization op (backprop) | // Run optimization op (backprop) | ||||
| sess.run(optimizer, new FeedItem(x, x_batch), new FeedItem(y, y_batch)); | sess.run(optimizer, new FeedItem(x, x_batch), new FeedItem(y, y_batch)); | ||||
| @@ -161,7 +161,8 @@ namespace TensorFlowNET.Examples.ImageProcess | |||||
| } | } | ||||
| // Run validation after every epoch | // Run validation after every epoch | ||||
| var results1 = sess.run(new[] { loss, accuracy }, new FeedItem(x, mnist.validation.data), new FeedItem(y, mnist.validation.labels)); | |||||
| var results1 = sess.run(new[] { loss, accuracy }, new FeedItem(x, mnist.Validation.Data), new FeedItem(y, mnist.Validation.Labels)); | |||||
| loss_val = results1[0]; | loss_val = results1[0]; | ||||
| accuracy_val = results1[1]; | accuracy_val = results1[1]; | ||||
| print("---------------------------------------------------------"); | print("---------------------------------------------------------"); | ||||
| @@ -172,35 +173,12 @@ namespace TensorFlowNET.Examples.ImageProcess | |||||
| public void Test(Session sess) | public void Test(Session sess) | ||||
| { | { | ||||
| var result = sess.run(new[] { loss, accuracy }, new FeedItem(x, mnist.test.data), new FeedItem(y, mnist.test.labels)); | |||||
| var result = sess.run(new[] { loss, accuracy }, new FeedItem(x, mnist.Test.Data), new FeedItem(y, mnist.Test.Labels)); | |||||
| loss_test = result[0]; | loss_test = result[0]; | ||||
| accuracy_test = result[1]; | accuracy_test = result[1]; | ||||
| print("---------------------------------------------------------"); | print("---------------------------------------------------------"); | ||||
| print($"Test loss: {loss_test.ToString("0.0000")}, test accuracy: {accuracy_test.ToString("P")}"); | print($"Test loss: {loss_test.ToString("0.0000")}, test accuracy: {accuracy_test.ToString("P")}"); | ||||
| print("---------------------------------------------------------"); | print("---------------------------------------------------------"); | ||||
| } | } | ||||
| private (NDArray, NDArray) randomize(NDArray x, NDArray y) | |||||
| { | |||||
| var perm = np.random.permutation(y.shape[0]); | |||||
| np.random.shuffle(perm); | |||||
| return (mnist.train.data[perm], mnist.train.labels[perm]); | |||||
| } | |||||
| /// <summary> | |||||
| /// selects a few number of images determined by the batch_size variable (if you don't know why, read about Stochastic Gradient Method) | |||||
| /// </summary> | |||||
| /// <param name="x"></param> | |||||
| /// <param name="y"></param> | |||||
| /// <param name="start"></param> | |||||
| /// <param name="end"></param> | |||||
| /// <returns></returns> | |||||
| private (NDArray, NDArray) get_next_batch(NDArray x, NDArray y, int start, int end) | |||||
| { | |||||
| var x_batch = x[$"{start}:{end}"]; | |||||
| var y_batch = y[$"{start}:{end}"]; | |||||
| return (x_batch, y_batch); | |||||
| } | |||||
| } | } | ||||
| } | } | ||||
| @@ -17,10 +17,10 @@ | |||||
| using NumSharp; | using NumSharp; | ||||
| using System; | using System; | ||||
| using Tensorflow; | using Tensorflow; | ||||
| using TensorFlowNET.Examples.Utility; | |||||
| using Tensorflow.Hub; | |||||
| using static Tensorflow.Python; | using static Tensorflow.Python; | ||||
| namespace TensorFlowNET.Examples.ImageProcess | |||||
| namespace TensorFlowNET.Examples | |||||
| { | { | ||||
| /// <summary> | /// <summary> | ||||
| /// Recurrent Neural Network for handwritten digits MNIST. | /// Recurrent Neural Network for handwritten digits MNIST. | ||||
| @@ -45,7 +45,7 @@ namespace TensorFlowNET.Examples.ImageProcess | |||||
| int n_inputs = 28; | int n_inputs = 28; | ||||
| int n_outputs = 10; | int n_outputs = 10; | ||||
| Datasets<DataSetMnist> mnist; | |||||
| Datasets<MnistDataSet> mnist; | |||||
| Tensor x, y; | Tensor x, y; | ||||
| Tensor loss, accuracy, cls_prediction; | Tensor loss, accuracy, cls_prediction; | ||||
| @@ -64,11 +64,11 @@ namespace TensorFlowNET.Examples.ImageProcess | |||||
| PrepareData(); | PrepareData(); | ||||
| BuildGraph(); | BuildGraph(); | ||||
| with(tf.Session(), sess => | |||||
| using (var sess = tf.Session()) | |||||
| { | { | ||||
| Train(sess); | Train(sess); | ||||
| Test(sess); | Test(sess); | ||||
| }); | |||||
| } | |||||
| return loss_test < 0.09 && accuracy_test > 0.95; | return loss_test < 0.09 && accuracy_test > 0.95; | ||||
| } | } | ||||
| @@ -143,15 +143,15 @@ namespace TensorFlowNET.Examples.ImageProcess | |||||
| public void PrepareData() | public void PrepareData() | ||||
| { | { | ||||
| mnist = MNIST.read_data_sets("mnist", one_hot: true); | |||||
| (x_train, y_train) = (mnist.train.data, mnist.train.labels); | |||||
| (x_valid, y_valid) = (mnist.validation.data, mnist.validation.labels); | |||||
| (x_test, y_test) = (mnist.test.data, mnist.test.labels); | |||||
| mnist = MnistModelLoader.LoadAsync(".resources/mnist", oneHot: true).Result; | |||||
| (x_train, y_train) = (mnist.Train.Data, mnist.Train.Labels); | |||||
| (x_valid, y_valid) = (mnist.Validation.Data, mnist.Validation.Labels); | |||||
| (x_test, y_test) = (mnist.Test.Data, mnist.Test.Labels); | |||||
| print("Size of:"); | print("Size of:"); | ||||
| print($"- Training-set:\t\t{len(mnist.train.data)}"); | |||||
| print($"- Validation-set:\t{len(mnist.validation.data)}"); | |||||
| print($"- Test-set:\t\t{len(mnist.test.data)}"); | |||||
| print($"- Training-set:\t\t{len(mnist.Train.Data)}"); | |||||
| print($"- Validation-set:\t{len(mnist.Validation.Data)}"); | |||||
| print($"- Test-set:\t\t{len(mnist.Test.Data)}"); | |||||
| } | } | ||||
| public Graph ImportGraph() => throw new NotImplementedException(); | public Graph ImportGraph() => throw new NotImplementedException(); | ||||
| @@ -4,7 +4,7 @@ using Tensorflow; | |||||
| using TensorFlowNET.Examples.Utility; | using TensorFlowNET.Examples.Utility; | ||||
| using static Tensorflow.Python; | using static Tensorflow.Python; | ||||
| namespace TensorFlowNET.Examples.ImageProcess | |||||
| namespace TensorFlowNET.Examples | |||||
| { | { | ||||
| /// <summary> | /// <summary> | ||||
| /// This example removes the background from an input image. | /// This example removes the background from an input image. | ||||
| @@ -32,11 +32,11 @@ namespace TensorFlowNET.Examples.ImageProcess | |||||
| Tensor output = graph.OperationByName("SemanticPredictions"); | Tensor output = graph.OperationByName("SemanticPredictions"); | ||||
| with(tf.Session(graph), sess => | |||||
| using (var sess = tf.Session(graph)) | |||||
| { | { | ||||
| // Runs inference on a single image. | // Runs inference on a single image. | ||||
| sess.run(output, new FeedItem(output, "[np.asarray(resized_image)]")); | sess.run(output, new FeedItem(output, "[np.asarray(resized_image)]")); | ||||
| }); | |||||
| } | |||||
| return false; | return false; | ||||
| } | } | ||||
| @@ -45,7 +45,7 @@ namespace TensorFlowNET.Examples | |||||
| var result_labels = new List<string>(); | var result_labels = new List<string>(); | ||||
| var sw = new Stopwatch(); | var sw = new Stopwatch(); | ||||
| with(tf.Session(graph), sess => | |||||
| using (var sess = tf.Session(graph)) | |||||
| { | { | ||||
| foreach (var nd in file_ndarrays) | foreach (var nd in file_ndarrays) | ||||
| { | { | ||||
| @@ -58,7 +58,7 @@ namespace TensorFlowNET.Examples | |||||
| Console.WriteLine($"{labels[idx]} {results[idx]} in {sw.ElapsedMilliseconds}ms", Color.Tan); | Console.WriteLine($"{labels[idx]} {results[idx]} in {sw.ElapsedMilliseconds}ms", Color.Tan); | ||||
| result_labels.Add(labels[idx]); | result_labels.Add(labels[idx]); | ||||
| } | } | ||||
| }); | |||||
| } | |||||
| return result_labels.Contains("military uniform"); | return result_labels.Contains("military uniform"); | ||||
| } | } | ||||
| @@ -69,19 +69,19 @@ namespace TensorFlowNET.Examples | |||||
| int input_mean = 117, | int input_mean = 117, | ||||
| int input_std = 1) | int input_std = 1) | ||||
| { | { | ||||
| return with(tf.Graph().as_default(), graph => | |||||
| { | |||||
| var file_reader = tf.read_file(file_name, "file_reader"); | |||||
| var decodeJpeg = tf.image.decode_jpeg(file_reader, channels: 3, name: "DecodeJpeg"); | |||||
| var cast = tf.cast(decodeJpeg, tf.float32); | |||||
| var dims_expander = tf.expand_dims(cast, 0); | |||||
| var resize = tf.constant(new int[] { input_height, input_width }); | |||||
| var bilinear = tf.image.resize_bilinear(dims_expander, resize); | |||||
| var sub = tf.subtract(bilinear, new float[] { input_mean }); | |||||
| var normalized = tf.divide(sub, new float[] { input_std }); | |||||
| return with(tf.Session(graph), sess => sess.run(normalized)); | |||||
| }); | |||||
| var graph = tf.Graph().as_default(); | |||||
| var file_reader = tf.read_file(file_name, "file_reader"); | |||||
| var decodeJpeg = tf.image.decode_jpeg(file_reader, channels: 3, name: "DecodeJpeg"); | |||||
| var cast = tf.cast(decodeJpeg, tf.float32); | |||||
| var dims_expander = tf.expand_dims(cast, 0); | |||||
| var resize = tf.constant(new int[] { input_height, input_width }); | |||||
| var bilinear = tf.image.resize_bilinear(dims_expander, resize); | |||||
| var sub = tf.subtract(bilinear, new float[] { input_mean }); | |||||
| var normalized = tf.divide(sub, new float[] { input_std }); | |||||
| using (var sess = tf.Session(graph)) | |||||
| return sess.run(normalized); | |||||
| } | } | ||||
| public void PrepareData() | public void PrepareData() | ||||
| @@ -45,9 +45,12 @@ namespace TensorFlowNET.Examples | |||||
| var input_operation = graph.get_operation_by_name(input_name); | var input_operation = graph.get_operation_by_name(input_name); | ||||
| var output_operation = graph.get_operation_by_name(output_name); | var output_operation = graph.get_operation_by_name(output_name); | ||||
| var results = with(tf.Session(graph), | |||||
| sess => sess.run(output_operation.outputs[0], | |||||
| new FeedItem(input_operation.outputs[0], nd))); | |||||
| NDArray results; | |||||
| using (var sess = tf.Session(graph)) | |||||
| { | |||||
| results = sess.run(output_operation.outputs[0], | |||||
| new FeedItem(input_operation.outputs[0], nd)); | |||||
| } | |||||
| results = np.squeeze(results); | results = np.squeeze(results); | ||||
| @@ -69,19 +72,19 @@ namespace TensorFlowNET.Examples | |||||
| int input_mean = 0, | int input_mean = 0, | ||||
| int input_std = 255) | int input_std = 255) | ||||
| { | { | ||||
| return with(tf.Graph().as_default(), graph => | |||||
| { | |||||
| var file_reader = tf.read_file(file_name, "file_reader"); | |||||
| var image_reader = tf.image.decode_jpeg(file_reader, channels: 3, name: "jpeg_reader"); | |||||
| var caster = tf.cast(image_reader, tf.float32); | |||||
| var dims_expander = tf.expand_dims(caster, 0); | |||||
| var resize = tf.constant(new int[] { input_height, input_width }); | |||||
| var bilinear = tf.image.resize_bilinear(dims_expander, resize); | |||||
| var sub = tf.subtract(bilinear, new float[] { input_mean }); | |||||
| var normalized = tf.divide(sub, new float[] { input_std }); | |||||
| return with(tf.Session(graph), sess => sess.run(normalized)); | |||||
| }); | |||||
| var graph = tf.Graph().as_default(); | |||||
| var file_reader = tf.read_file(file_name, "file_reader"); | |||||
| var image_reader = tf.image.decode_jpeg(file_reader, channels: 3, name: "jpeg_reader"); | |||||
| var caster = tf.cast(image_reader, tf.float32); | |||||
| var dims_expander = tf.expand_dims(caster, 0); | |||||
| var resize = tf.constant(new int[] { input_height, input_width }); | |||||
| var bilinear = tf.image.resize_bilinear(dims_expander, resize); | |||||
| var sub = tf.subtract(bilinear, new float[] { input_mean }); | |||||
| var normalized = tf.divide(sub, new float[] { input_std }); | |||||
| using (var sess = tf.Session(graph)) | |||||
| return sess.run(normalized); | |||||
| } | } | ||||
| public void PrepareData() | public void PrepareData() | ||||
| @@ -51,7 +51,8 @@ namespace TensorFlowNET.Examples | |||||
| var graph = IsImportingGraph ? ImportGraph() : BuildGraph(); | var graph = IsImportingGraph ? ImportGraph() : BuildGraph(); | ||||
| with(tf.Session(graph), sess => Predict(sess)); | |||||
| using (var sess = tf.Session(graph)) | |||||
| Predict(sess); | |||||
| return true; | return true; | ||||
| } | } | ||||
| @@ -101,14 +102,15 @@ namespace TensorFlowNET.Examples | |||||
| private NDArray ReadTensorFromImageFile(string file_name) | private NDArray ReadTensorFromImageFile(string file_name) | ||||
| { | { | ||||
| return with(tf.Graph().as_default(), graph => | |||||
| { | |||||
| var file_reader = tf.read_file(file_name, "file_reader"); | |||||
| var decodeJpeg = tf.image.decode_jpeg(file_reader, channels: 3, name: "DecodeJpeg"); | |||||
| var casted = tf.cast(decodeJpeg, TF_DataType.TF_UINT8); | |||||
| var dims_expander = tf.expand_dims(casted, 0); | |||||
| return with(tf.Session(graph), sess => sess.run(dims_expander)); | |||||
| }); | |||||
| var graph = tf.Graph().as_default(); | |||||
| var file_reader = tf.read_file(file_name, "file_reader"); | |||||
| var decodeJpeg = tf.image.decode_jpeg(file_reader, channels: 3, name: "DecodeJpeg"); | |||||
| var casted = tf.cast(decodeJpeg, TF_DataType.TF_UINT8); | |||||
| var dims_expander = tf.expand_dims(casted, 0); | |||||
| using (var sess = tf.Session(graph)) | |||||
| return sess.run(dims_expander); | |||||
| } | } | ||||
| private void buildOutputImage(NDArray[] resultArr) | private void buildOutputImage(NDArray[] resultArr) | ||||
| @@ -25,7 +25,7 @@ using Tensorflow; | |||||
| using TensorFlowNET.Examples.Utility; | using TensorFlowNET.Examples.Utility; | ||||
| using static Tensorflow.Python; | using static Tensorflow.Python; | ||||
| namespace TensorFlowNET.Examples.ImageProcess | |||||
| namespace TensorFlowNET.Examples | |||||
| { | { | ||||
| /// <summary> | /// <summary> | ||||
| /// In this tutorial, we will reuse the feature extraction capabilities from powerful image classifiers trained on ImageNet | /// In this tutorial, we will reuse the feature extraction capabilities from powerful image classifiers trained on ImageNet | ||||
| @@ -83,19 +83,19 @@ namespace TensorFlowNET.Examples.ImageProcess | |||||
| #region For debug purpose | #region For debug purpose | ||||
| // predict images | // predict images | ||||
| Predict(null); | |||||
| // Predict(null); | |||||
| // load saved pb and test new images. | // load saved pb and test new images. | ||||
| Test(null); | |||||
| // Test(null); | |||||
| #endregion | #endregion | ||||
| var graph = IsImportingGraph ? ImportGraph() : BuildGraph(); | var graph = IsImportingGraph ? ImportGraph() : BuildGraph(); | ||||
| with(tf.Session(graph), sess => | |||||
| using (var sess = tf.Session(graph)) | |||||
| { | { | ||||
| Train(sess); | Train(sess); | ||||
| }); | |||||
| } | |||||
| return test_accuracy > 0.75f; | return test_accuracy > 0.75f; | ||||
| } | } | ||||
| @@ -141,20 +141,18 @@ namespace TensorFlowNET.Examples.ImageProcess | |||||
| Tensor evaluation_step = null; | Tensor evaluation_step = null; | ||||
| Tensor prediction = null; | Tensor prediction = null; | ||||
| with(eval_graph.as_default(), graph => | |||||
| { | |||||
| // Add the new layer for exporting. | |||||
| var (_, _, bottleneck_input, ground_truth_input, final_tensor) = | |||||
| add_final_retrain_ops(class_count, final_tensor_name, bottleneck_tensor, | |||||
| wants_quantization, is_training: false); | |||||
| var graph = eval_graph.as_default(); | |||||
| // Add the new layer for exporting. | |||||
| var (_, _, bottleneck_input, ground_truth_input, final_tensor) = | |||||
| add_final_retrain_ops(class_count, final_tensor_name, bottleneck_tensor, | |||||
| wants_quantization, is_training: false); | |||||
| // Now we need to restore the values from the training graph to the eval | |||||
| // graph. | |||||
| tf.train.Saver().restore(eval_sess, CHECKPOINT_NAME); | |||||
| // Now we need to restore the values from the training graph to the eval | |||||
| // graph. | |||||
| tf.train.Saver().restore(eval_sess, CHECKPOINT_NAME); | |||||
| (evaluation_step, prediction) = add_evaluation_step(final_tensor, | |||||
| ground_truth_input); | |||||
| }); | |||||
| (evaluation_step, prediction) = add_evaluation_step(final_tensor, | |||||
| ground_truth_input); | |||||
| return (eval_sess, resized_input_tensor, bottleneck_input, ground_truth_input, | return (eval_sess, resized_input_tensor, bottleneck_input, ground_truth_input, | ||||
| evaluation_step, prediction); | evaluation_step, prediction); | ||||
| @@ -180,7 +178,7 @@ namespace TensorFlowNET.Examples.ImageProcess | |||||
| Tensor bottleneck_tensor, bool quantize_layer, bool is_training) | Tensor bottleneck_tensor, bool quantize_layer, bool is_training) | ||||
| { | { | ||||
| var (batch_size, bottleneck_tensor_size) = (bottleneck_tensor.TensorShape.Dimensions[0], bottleneck_tensor.TensorShape.Dimensions[1]); | var (batch_size, bottleneck_tensor_size) = (bottleneck_tensor.TensorShape.Dimensions[0], bottleneck_tensor.TensorShape.Dimensions[1]); | ||||
| with(tf.name_scope("input"), scope => | |||||
| tf_with(tf.name_scope("input"), scope => | |||||
| { | { | ||||
| bottleneck_input = tf.placeholder_with_default( | bottleneck_input = tf.placeholder_with_default( | ||||
| bottleneck_tensor, | bottleneck_tensor, | ||||
| @@ -193,10 +191,10 @@ namespace TensorFlowNET.Examples.ImageProcess | |||||
| // Organizing the following ops so they are easier to see in TensorBoard. | // Organizing the following ops so they are easier to see in TensorBoard. | ||||
| string layer_name = "final_retrain_ops"; | string layer_name = "final_retrain_ops"; | ||||
| Tensor logits = null; | Tensor logits = null; | ||||
| with(tf.name_scope(layer_name), scope => | |||||
| tf_with(tf.name_scope(layer_name), scope => | |||||
| { | { | ||||
| RefVariable layer_weights = null; | RefVariable layer_weights = null; | ||||
| with(tf.name_scope("weights"), delegate | |||||
| tf_with(tf.name_scope("weights"), delegate | |||||
| { | { | ||||
| var initial_value = tf.truncated_normal(new int[] { bottleneck_tensor_size, class_count }, stddev: 0.001f); | var initial_value = tf.truncated_normal(new int[] { bottleneck_tensor_size, class_count }, stddev: 0.001f); | ||||
| layer_weights = tf.Variable(initial_value, name: "final_weights"); | layer_weights = tf.Variable(initial_value, name: "final_weights"); | ||||
| @@ -204,13 +202,13 @@ namespace TensorFlowNET.Examples.ImageProcess | |||||
| }); | }); | ||||
| RefVariable layer_biases = null; | RefVariable layer_biases = null; | ||||
| with(tf.name_scope("biases"), delegate | |||||
| tf_with(tf.name_scope("biases"), delegate | |||||
| { | { | ||||
| layer_biases = tf.Variable(tf.zeros(class_count), name: "final_biases"); | layer_biases = tf.Variable(tf.zeros(class_count), name: "final_biases"); | ||||
| variable_summaries(layer_biases); | variable_summaries(layer_biases); | ||||
| }); | }); | ||||
| with(tf.name_scope("Wx_plus_b"), delegate | |||||
| tf_with(tf.name_scope("Wx_plus_b"), delegate | |||||
| { | { | ||||
| logits = tf.matmul(bottleneck_input, layer_weights) + layer_biases; | logits = tf.matmul(bottleneck_input, layer_weights) + layer_biases; | ||||
| tf.summary.histogram("pre_activations", logits); | tf.summary.histogram("pre_activations", logits); | ||||
| @@ -239,7 +237,7 @@ namespace TensorFlowNET.Examples.ImageProcess | |||||
| return (null, null, bottleneck_input, ground_truth_input, final_tensor); | return (null, null, bottleneck_input, ground_truth_input, final_tensor); | ||||
| Tensor cross_entropy_mean = null; | Tensor cross_entropy_mean = null; | ||||
| with(tf.name_scope("cross_entropy"), delegate | |||||
| tf_with(tf.name_scope("cross_entropy"), delegate | |||||
| { | { | ||||
| cross_entropy_mean = tf.losses.sparse_softmax_cross_entropy( | cross_entropy_mean = tf.losses.sparse_softmax_cross_entropy( | ||||
| labels: ground_truth_input, logits: logits); | labels: ground_truth_input, logits: logits); | ||||
| @@ -247,7 +245,7 @@ namespace TensorFlowNET.Examples.ImageProcess | |||||
| tf.summary.scalar("cross_entropy", cross_entropy_mean); | tf.summary.scalar("cross_entropy", cross_entropy_mean); | ||||
| with(tf.name_scope("train"), delegate | |||||
| tf_with(tf.name_scope("train"), delegate | |||||
| { | { | ||||
| var optimizer = tf.train.GradientDescentOptimizer(learning_rate); | var optimizer = tf.train.GradientDescentOptimizer(learning_rate); | ||||
| train_step = optimizer.minimize(cross_entropy_mean); | train_step = optimizer.minimize(cross_entropy_mean); | ||||
| @@ -259,12 +257,12 @@ namespace TensorFlowNET.Examples.ImageProcess | |||||
| private void variable_summaries(RefVariable var) | private void variable_summaries(RefVariable var) | ||||
| { | { | ||||
| with(tf.name_scope("summaries"), delegate | |||||
| tf_with(tf.name_scope("summaries"), delegate | |||||
| { | { | ||||
| var mean = tf.reduce_mean(var); | var mean = tf.reduce_mean(var); | ||||
| tf.summary.scalar("mean", mean); | tf.summary.scalar("mean", mean); | ||||
| Tensor stddev = null; | Tensor stddev = null; | ||||
| with(tf.name_scope("stddev"), delegate | |||||
| tf_with(tf.name_scope("stddev"), delegate | |||||
| { | { | ||||
| stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean))); | stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean))); | ||||
| }); | }); | ||||
| @@ -279,7 +277,7 @@ namespace TensorFlowNET.Examples.ImageProcess | |||||
| { | { | ||||
| var (height, width) = (299, 299); | var (height, width) = (299, 299); | ||||
| return with(tf.Graph().as_default(), graph => | |||||
| return tf_with(tf.Graph().as_default(), graph => | |||||
| { | { | ||||
| tf.train.import_meta_graph("graph/InceptionV3.meta"); | tf.train.import_meta_graph("graph/InceptionV3.meta"); | ||||
| Tensor resized_input_tensor = graph.OperationByName("Placeholder"); //tf.placeholder(tf.float32, new TensorShape(-1, height, width, 3)); | Tensor resized_input_tensor = graph.OperationByName("Placeholder"); //tf.placeholder(tf.float32, new TensorShape(-1, height, width, 3)); | ||||
| @@ -350,15 +348,15 @@ namespace TensorFlowNET.Examples.ImageProcess | |||||
| { | { | ||||
| Tensor evaluation_step = null, correct_prediction = null, prediction = null; | Tensor evaluation_step = null, correct_prediction = null, prediction = null; | ||||
| with(tf.name_scope("accuracy"), scope => | |||||
| tf_with(tf.name_scope("accuracy"), scope => | |||||
| { | { | ||||
| with(tf.name_scope("correct_prediction"), delegate | |||||
| tf_with(tf.name_scope("correct_prediction"), delegate | |||||
| { | { | ||||
| prediction = tf.argmax(result_tensor, 1); | prediction = tf.argmax(result_tensor, 1); | ||||
| correct_prediction = tf.equal(prediction, ground_truth_tensor); | correct_prediction = tf.equal(prediction, ground_truth_tensor); | ||||
| }); | }); | ||||
| with(tf.name_scope("accuracy"), delegate | |||||
| tf_with(tf.name_scope("accuracy"), delegate | |||||
| { | { | ||||
| evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)); | evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)); | ||||
| }); | }); | ||||
| @@ -596,7 +594,7 @@ namespace TensorFlowNET.Examples.ImageProcess | |||||
| create_module_graph(); | create_module_graph(); | ||||
| // Add the new layer that we'll be training. | // Add the new layer that we'll be training. | ||||
| with(graph.as_default(), delegate | |||||
| tf_with(graph.as_default(), delegate | |||||
| { | { | ||||
| (train_step, cross_entropy, bottleneck_input, | (train_step, cross_entropy, bottleneck_input, | ||||
| ground_truth_input, final_tensor) = add_final_retrain_ops( | ground_truth_input, final_tensor) = add_final_retrain_ops( | ||||
| @@ -745,13 +743,13 @@ namespace TensorFlowNET.Examples.ImageProcess | |||||
| Tensor input = graph.OperationByName("Placeholder"); | Tensor input = graph.OperationByName("Placeholder"); | ||||
| Tensor output = graph.OperationByName("final_result"); | Tensor output = graph.OperationByName("final_result"); | ||||
| with(tf.Session(graph), sess => | |||||
| using (var sess = tf.Session(graph)) | |||||
| { | { | ||||
| var result = sess.run(output, new FeedItem(input, fileBytes)); | var result = sess.run(output, new FeedItem(input, fileBytes)); | ||||
| var prob = np.squeeze(result); | var prob = np.squeeze(result); | ||||
| var idx = np.argmax(prob); | var idx = np.argmax(prob); | ||||
| print($"Prediction result: [{labels[idx]} {prob[idx][0]}] for {img_path}."); | print($"Prediction result: [{labels[idx]} {prob[idx][0]}] for {img_path}."); | ||||
| }); | |||||
| } | |||||
| } | } | ||||
| private NDArray ReadTensorFromImageFile(string file_name, | private NDArray ReadTensorFromImageFile(string file_name, | ||||
| @@ -760,19 +758,19 @@ namespace TensorFlowNET.Examples.ImageProcess | |||||
| int input_mean = 0, | int input_mean = 0, | ||||
| int input_std = 255) | int input_std = 255) | ||||
| { | { | ||||
| return with(tf.Graph().as_default(), graph => | |||||
| { | |||||
| var file_reader = tf.read_file(file_name, "file_reader"); | |||||
| var image_reader = tf.image.decode_jpeg(file_reader, channels: 3, name: "jpeg_reader"); | |||||
| var caster = tf.cast(image_reader, tf.float32); | |||||
| var dims_expander = tf.expand_dims(caster, 0); | |||||
| var resize = tf.constant(new int[] { input_height, input_width }); | |||||
| var bilinear = tf.image.resize_bilinear(dims_expander, resize); | |||||
| var sub = tf.subtract(bilinear, new float[] { input_mean }); | |||||
| var normalized = tf.divide(sub, new float[] { input_std }); | |||||
| return with(tf.Session(graph), sess => sess.run(normalized)); | |||||
| }); | |||||
| var graph = tf.Graph().as_default(); | |||||
| var file_reader = tf.read_file(file_name, "file_reader"); | |||||
| var image_reader = tf.image.decode_jpeg(file_reader, channels: 3, name: "jpeg_reader"); | |||||
| var caster = tf.cast(image_reader, tf.float32); | |||||
| var dims_expander = tf.expand_dims(caster, 0); | |||||
| var resize = tf.constant(new int[] { input_height, input_width }); | |||||
| var bilinear = tf.image.resize_bilinear(dims_expander, resize); | |||||
| var sub = tf.subtract(bilinear, new float[] { input_mean }); | |||||
| var normalized = tf.divide(sub, new float[] { input_std }); | |||||
| using (var sess = tf.Session(graph)) | |||||
| return sess.run(normalized); | |||||
| } | } | ||||
| public void Test(Session sess_) | public void Test(Session sess_) | ||||
| @@ -783,7 +781,7 @@ namespace TensorFlowNET.Examples.ImageProcess | |||||
| var graph = Graph.ImportFromPB(output_graph); | var graph = Graph.ImportFromPB(output_graph); | ||||
| var (jpeg_data_tensor, decoded_image_tensor) = add_jpeg_decoding(); | var (jpeg_data_tensor, decoded_image_tensor) = add_jpeg_decoding(); | ||||
| with(tf.Session(graph), sess => | |||||
| tf_with(tf.Session(graph), sess => | |||||
| { | { | ||||
| (test_accuracy, predictions) = run_final_eval(sess, null, class_count, image_lists, | (test_accuracy, predictions) = run_final_eval(sess, null, class_count, image_lists, | ||||
| jpeg_data_tensor, decoded_image_tensor, resized_image_tensor, | jpeg_data_tensor, decoded_image_tensor, resized_image_tensor, | ||||
| @@ -0,0 +1,24 @@ | |||||
| <Project Sdk="Microsoft.NET.Sdk"> | |||||
| <PropertyGroup> | |||||
| <OutputType>Exe</OutputType> | |||||
| <TargetFramework>netcoreapp2.2</TargetFramework> | |||||
| <GeneratePackageOnBuild>false</GeneratePackageOnBuild> | |||||
| </PropertyGroup> | |||||
| <ItemGroup> | |||||
| <PackageReference Include="Colorful.Console" Version="1.2.9" /> | |||||
| <PackageReference Include="Newtonsoft.Json" Version="12.0.2" /> | |||||
| <PackageReference Include="SciSharp.TensorFlow.Redist-Windows-GPU" Version="1.14.0" /> | |||||
| <PackageReference Include="SharpZipLib" Version="1.1.0" /> | |||||
| <PackageReference Include="System.Drawing.Common" Version="4.5.1" /> | |||||
| </ItemGroup> | |||||
| <ItemGroup> | |||||
| <ProjectReference Include="..\..\src\KerasNET.Core\Keras.Core.csproj" /> | |||||
| <ProjectReference Include="..\..\src\TensorFlowDatasets\TensorFlowDatasets.csproj" /> | |||||
| <ProjectReference Include="..\..\src\TensorFlowNET.Core\TensorFlowNET.Core.csproj" /> | |||||
| <ProjectReference Include="..\..\src\TensorFlowText\TensorFlowText.csproj" /> | |||||
| <ProjectReference Include="..\..\src\TensorFlowHub\TensorFlowHub.csproj" /> | |||||
| </ItemGroup> | |||||
| </Project> | |||||
| @@ -16,7 +16,9 @@ | |||||
| <ItemGroup> | <ItemGroup> | ||||
| <ProjectReference Include="..\..\src\KerasNET.Core\Keras.Core.csproj" /> | <ProjectReference Include="..\..\src\KerasNET.Core\Keras.Core.csproj" /> | ||||
| <ProjectReference Include="..\..\src\TensorFlowDatasets\TensorFlowDatasets.csproj" /> | |||||
| <ProjectReference Include="..\..\src\TensorFlowNET.Core\TensorFlowNET.Core.csproj" /> | <ProjectReference Include="..\..\src\TensorFlowNET.Core\TensorFlowNET.Core.csproj" /> | ||||
| <ProjectReference Include="..\..\src\TensorFlowText\TensorFlowText.csproj" /> | <ProjectReference Include="..\..\src\TensorFlowText\TensorFlowText.csproj" /> | ||||
| <ProjectReference Include="..\..\src\TensorFlowHub\TensorFlowHub.csproj" /> | |||||
| </ItemGroup> | </ItemGroup> | ||||
| </Project> | </Project> | ||||
| @@ -64,7 +64,9 @@ namespace TensorFlowNET.Examples | |||||
| { | { | ||||
| PrepareData(); | PrepareData(); | ||||
| var graph = IsImportingGraph ? ImportGraph() : BuildGraph(); | var graph = IsImportingGraph ? ImportGraph() : BuildGraph(); | ||||
| with(tf.Session(graph), sess => Train(sess)); | |||||
| using (var sess = tf.Session(graph)) | |||||
| Train(sess); | |||||
| return max_accuracy > 0.9; | return max_accuracy > 0.9; | ||||
| } | } | ||||
| @@ -63,7 +63,7 @@ namespace TensorFlowNET.Examples.Text.NER | |||||
| var init = tf.global_variables_initializer(); | var init = tf.global_variables_initializer(); | ||||
| with(tf.Session(), sess => | |||||
| using (var sess = tf.Session()) | |||||
| { | { | ||||
| sess.run(init); | sess.run(init); | ||||
| @@ -73,7 +73,7 @@ namespace TensorFlowNET.Examples.Text.NER | |||||
| loss_value = run_epoch(sess, train, dev, epoch); | loss_value = run_epoch(sess, train, dev, epoch); | ||||
| print($"train loss: {loss_value}"); | print($"train loss: {loss_value}"); | ||||
| } | } | ||||
| }); | |||||
| } | |||||
| return loss_value < 0.1; | return loss_value < 0.1; | ||||
| } | } | ||||