| @@ -332,3 +332,7 @@ src/TensorFlowNET.Native/bazel-* | |||||
| src/TensorFlowNET.Native/c_api.h | src/TensorFlowNET.Native/c_api.h | ||||
| /.vscode | /.vscode | ||||
| test/TensorFlowNET.Examples/mnist | test/TensorFlowNET.Examples/mnist | ||||
| # training model resources | |||||
| .resources | |||||
| @@ -28,8 +28,14 @@ In comparison to other projects, like for instance TensorFlowSharp which only pr | |||||
| Install TF.NET and TensorFlow binary through NuGet. | Install TF.NET and TensorFlow binary through NuGet. | ||||
| ```sh | ```sh | ||||
| ### install tensorflow C# binding | |||||
| PM> Install-Package TensorFlow.NET | PM> Install-Package TensorFlow.NET | ||||
| ### Install tensorflow binary | |||||
| ### For CPU version | |||||
| PM> Install-Package SciSharp.TensorFlow.Redist | PM> Install-Package SciSharp.TensorFlow.Redist | ||||
| ### For GPU version (CUDA and cuDNN are required) | |||||
| PM> Install-Package SciSharp.TensorFlow.Redist-Windows-GPU | |||||
| ``` | ``` | ||||
| Import TF.NET. | Import TF.NET. | ||||
| @@ -17,7 +17,11 @@ Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "TensorFlowBenchmark", "src\ | |||||
| EndProject | EndProject | ||||
| Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "TensorFlowHub", "src\TensorFlowHub\TensorFlowHub.csproj", "{8FD59A5A-97EB-457E-B9F1-D88B0C822C6E}" | Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "TensorFlowHub", "src\TensorFlowHub\TensorFlowHub.csproj", "{8FD59A5A-97EB-457E-B9F1-D88B0C822C6E}" | ||||
| EndProject | EndProject | ||||
| Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "TensorFlowText", "src\TensorFlowText\TensorFlowText.csproj", "{B598E5D5-BD2D-4191-8532-F2FBAC31AB81}" | |||||
| Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "TensorFlowText", "src\TensorFlowText\TensorFlowText.csproj", "{B598E5D5-BD2D-4191-8532-F2FBAC31AB81}" | |||||
| EndProject | |||||
| Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "TensorFlowDatasets", "src\TensorFlowDatasets\TensorFlowDatasets.csproj", "{DF151A51-E9FD-41BD-B0F4-08A743755D44}" | |||||
| EndProject | |||||
| Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "TensorFlowNET.Examples.GPU", "test\TensorFlowNET.Examples\TensorFlowNET.Examples.GPU.csproj", "{6F6B3382-8F87-4CD9-BF87-C81D5405685A}" | |||||
| EndProject | EndProject | ||||
| Global | Global | ||||
| GlobalSection(SolutionConfigurationPlatforms) = preSolution | GlobalSection(SolutionConfigurationPlatforms) = preSolution | ||||
| @@ -57,6 +61,14 @@ Global | |||||
| {B598E5D5-BD2D-4191-8532-F2FBAC31AB81}.Debug|Any CPU.Build.0 = Debug|Any CPU | {B598E5D5-BD2D-4191-8532-F2FBAC31AB81}.Debug|Any CPU.Build.0 = Debug|Any CPU | ||||
| {B598E5D5-BD2D-4191-8532-F2FBAC31AB81}.Release|Any CPU.ActiveCfg = Release|Any CPU | {B598E5D5-BD2D-4191-8532-F2FBAC31AB81}.Release|Any CPU.ActiveCfg = Release|Any CPU | ||||
| {B598E5D5-BD2D-4191-8532-F2FBAC31AB81}.Release|Any CPU.Build.0 = Release|Any CPU | {B598E5D5-BD2D-4191-8532-F2FBAC31AB81}.Release|Any CPU.Build.0 = Release|Any CPU | ||||
| {DF151A51-E9FD-41BD-B0F4-08A743755D44}.Debug|Any CPU.ActiveCfg = Debug|Any CPU | |||||
| {DF151A51-E9FD-41BD-B0F4-08A743755D44}.Debug|Any CPU.Build.0 = Debug|Any CPU | |||||
| {DF151A51-E9FD-41BD-B0F4-08A743755D44}.Release|Any CPU.ActiveCfg = Release|Any CPU | |||||
| {DF151A51-E9FD-41BD-B0F4-08A743755D44}.Release|Any CPU.Build.0 = Release|Any CPU | |||||
| {6F6B3382-8F87-4CD9-BF87-C81D5405685A}.Debug|Any CPU.ActiveCfg = Debug|Any CPU | |||||
| {6F6B3382-8F87-4CD9-BF87-C81D5405685A}.Debug|Any CPU.Build.0 = Debug|Any CPU | |||||
| {6F6B3382-8F87-4CD9-BF87-C81D5405685A}.Release|Any CPU.ActiveCfg = Release|Any CPU | |||||
| {6F6B3382-8F87-4CD9-BF87-C81D5405685A}.Release|Any CPU.Build.0 = Release|Any CPU | |||||
| EndGlobalSection | EndGlobalSection | ||||
| GlobalSection(SolutionProperties) = preSolution | GlobalSection(SolutionProperties) = preSolution | ||||
| HideSolutionNode = FALSE | HideSolutionNode = FALSE | ||||
| @@ -1,8 +1,14 @@ | |||||
| ## SciSharp.TensorFlow.Redist ## | ## SciSharp.TensorFlow.Redist ## | ||||
| `SciSharp.TensorFlow.Redist` is a migration from [Microsoft.ML.TensorFlow.Redist](https://github.com/dotnet/machinelearning/tree/release/1.2/src/Redist/Microsoft.ML.TensorFlow.Redist). [ML.NET](https://github.com/dotnet/machinelearning) team will not maintain the package since [ML.NET](https://www.nuget.org/packages/Microsoft.ML) v1.4.0 going forward. | |||||
| `SciSharp.TensorFlow.Redist` is a migration from [Microsoft.ML.TensorFlow.Redist](https://github.com/dotnet/machinelearning/tree/release/1.2/src/Redist/Microsoft.ML.TensorFlow.Redist). [ML.NET](https://github.com/dotnet/machinelearning) team will not maintain the package since [ML.NET](https://www.nuget.org/packages/Microsoft.ML) v1.3.0 going forward. | |||||
| * CPU version for all platforms (Windows, Linux, OSX) | |||||
| ```powershell | |||||
| PM> Install-Package SciSharp.TensorFlow.Redist | |||||
| ``` | |||||
| * GPU version for Windows | |||||
| ```powershell | ```powershell | ||||
| PM> Install-Package SciSharp.TensorFlow.Redist | PM> Install-Package SciSharp.TensorFlow.Redist | ||||
| ``` | ``` | ||||
| @@ -16,7 +22,7 @@ Related merged [commits](https://github.com/SciSharp/TensorFlow.NET/commit/854a5 | |||||
| On Windows, the tar command does not support extracting archives with symlinks. So when `dotnet pack` runs on Windows it will only package the Windows binaries. | On Windows, the tar command does not support extracting archives with symlinks. So when `dotnet pack` runs on Windows it will only package the Windows binaries. | ||||
| 1. Run `dotnet pack` under `src/SciSharp.TensorFlow.Redist` directory in Linux. | 1. Run `dotnet pack` under `src/SciSharp.TensorFlow.Redist` directory in Linux. | ||||
| 2. Run `nuget push SciSharp.TensorFlow.Redist.1.14.0.nupkg -k APIKEY -s https://api.nuget.org/v3/index.json` | |||||
| 2. Run `dotnet nuget push SciSharp.TensorFlow.Redist.1.14.0.nupkg -k APIKEY -s https://api.nuget.org/v3/index.json` | |||||
| @@ -9,7 +9,7 @@ | |||||
| <license type="file">LICENSE.txt</license> | <license type="file">LICENSE.txt</license> | ||||
| <licenseUrl>https://aka.ms/deprecateLicenseUrl</licenseUrl> | <licenseUrl>https://aka.ms/deprecateLicenseUrl</licenseUrl> | ||||
| <projectUrl>https://www.tensorflow.org/</projectUrl> | <projectUrl>https://www.tensorflow.org/</projectUrl> | ||||
| <description>$packageId$ contains the TensorFlow C library version $version$ redistributed as a NuGet package.</description> | |||||
| <description>$packageId$ contains the TensorFlow C library CPU version $version$ redistributed as a NuGet package.</description> | |||||
| <releaseNotes>https://github.com/tensorflow/tensorflow/releases/tag/v$version$</releaseNotes> | <releaseNotes>https://github.com/tensorflow/tensorflow/releases/tag/v$version$</releaseNotes> | ||||
| <copyright>Copyright 2019 The TensorFlow Authors. All rights reserved.</copyright> | <copyright>Copyright 2019 The TensorFlow Authors. All rights reserved.</copyright> | ||||
| <tags>TensorFlow</tags> | <tags>TensorFlow</tags> | ||||
| @@ -0,0 +1,26 @@ | |||||
| <?xml version="1.0" encoding="utf-8"?> | |||||
| <package xmlns="http://schemas.microsoft.com/packaging/2012/06/nuspec.xsd"> | |||||
| <metadata> | |||||
| <id>$packageId$</id> | |||||
| <version>$version$</version> | |||||
| <authors>The TensorFlow Authors</authors> | |||||
| <owners>The TensorFlow Authors</owners> | |||||
| <requireLicenseAcceptance>true</requireLicenseAcceptance> | |||||
| <license type="file">LICENSE.txt</license> | |||||
| <licenseUrl>https://aka.ms/deprecateLicenseUrl</licenseUrl> | |||||
| <projectUrl>https://www.tensorflow.org/</projectUrl> | |||||
| <description>$packageId$ contains the TensorFlow C library GPU version $version$ redistributed as a NuGet package.</description> | |||||
| <releaseNotes>https://github.com/tensorflow/tensorflow/releases/tag/v$version$</releaseNotes> | |||||
| <copyright>Copyright 2019 The TensorFlow Authors. All rights reserved.</copyright> | |||||
| <tags>TensorFlow</tags> | |||||
| <dependencies> | |||||
| <group targetFramework=".NETStandard2.0" /> | |||||
| </dependencies> | |||||
| </metadata> | |||||
| <files> | |||||
| <file src="CommonPackage.props" target="build\netstandard2.0\$packageId$.props" /> | |||||
| <file src="bin\packages\$packageId$\LICENSE.txt" target="LICENSE.txt" /> | |||||
| <file src="bin\packages\$packageId$\THIRD_PARTY_NOTICES.txt" target="THIRD_PARTY_NOTICES.txt" /> | |||||
| <file src="bin\packages\$packageId$\runtimes\**\*" target="runtimes" /> | |||||
| </files> | |||||
| </package> | |||||
| @@ -17,7 +17,7 @@ | |||||
| <NoBuild>true</NoBuild> | <NoBuild>true</NoBuild> | ||||
| <IncludeBuildOutput>false</IncludeBuildOutput> | <IncludeBuildOutput>false</IncludeBuildOutput> | ||||
| <NuspecFile>Redist.nuspec</NuspecFile> | |||||
| <NuspecFile>Redist-CPU.nuspec</NuspecFile> | |||||
| <NuspecProperties>packageId=$(PackageId);version=$(PackageVersion)</NuspecProperties> | <NuspecProperties>packageId=$(PackageId);version=$(PackageVersion)</NuspecProperties> | ||||
| <NuspecBasePath>$(ProjDir)</NuspecBasePath> | <NuspecBasePath>$(ProjDir)</NuspecBasePath> | ||||
| @@ -0,0 +1,187 @@ | |||||
| <Project Sdk="Microsoft.NET.Sdk"> | |||||
| <PropertyGroup> | |||||
| <ProjDir>$(MSBuildThisFileDirectory)</ProjDir> | |||||
| <BinDir>$(ProjDir)bin\</BinDir> | |||||
| <ObjDir>$(ProjDir)obj\</ObjDir> | |||||
| <TargetArchitecture Condition="'$(TargetArchitecture)' == ''">x64</TargetArchitecture> | |||||
| <TargetFramework>netstandard2.0</TargetFramework> | |||||
| <TensorFlowVersion>1.14.0</TensorFlowVersion> | |||||
| <TensorFlowMajorVersion>1</TensorFlowMajorVersion> | |||||
| <PackageAssetsPath>$(BinDir)packages\</PackageAssetsPath> | |||||
| <PackageId>$(MSBuildProjectName)</PackageId> | |||||
| <PackageVersion>$(TensorFlowVersion)</PackageVersion> | |||||
| <NoBuild>true</NoBuild> | |||||
| <IncludeBuildOutput>false</IncludeBuildOutput> | |||||
| <NuspecFile>Redist-Windows-GPU.nuspec</NuspecFile> | |||||
| <NuspecProperties>packageId=$(PackageId);version=$(PackageVersion)</NuspecProperties> | |||||
| <NuspecBasePath>$(ProjDir)</NuspecBasePath> | |||||
| <GenerateNuspecDependsOn>CopyFilesFromArchive</GenerateNuspecDependsOn> | |||||
| <PackageRid Condition="'$(OS)' == 'Windows_NT'">win</PackageRid> | |||||
| <PackageRid Condition="'$(OS)' != 'Windows_NT'">linux</PackageRid> | |||||
| <PackageRid Condition="$([MSBuild]::IsOSPlatform('osx'))">osx</PackageRid> | |||||
| <PackageRid>$(PackageRid)-$(TargetArchitecture)</PackageRid> | |||||
| </PropertyGroup> | |||||
| <PropertyGroup> | |||||
| <IncludeMLNetNotices>false</IncludeMLNetNotices> | |||||
| </PropertyGroup> | |||||
| <ItemGroup> | |||||
| <TensorFlowConfig Include="windows" | |||||
| FileExtension=".zip" | |||||
| FilesFromArchive="lib\tensorflow.dll; | |||||
| include\tensorflow\c\LICENSE" | |||||
| Runtime="win-x64"/> | |||||
| <TensorFlowConfig Condition="'$(OS)' != 'Windows_NT'" | |||||
| Include="linux" | |||||
| FileExtension=".tar.gz" | |||||
| FilesFromArchive="lib\libtensorflow.so; | |||||
| lib\libtensorflow_framework.so.$(TensorFlowMajorVersion); | |||||
| include\tensorflow\c\LICENSE" | |||||
| Runtime="linux-x64" /> | |||||
| <TensorFlowConfig Condition="'$(OS)' != 'Windows_NT'" | |||||
| Include="darwin" FileExtension=".tar.gz" | |||||
| FilesFromArchive="lib\libtensorflow.dylib; | |||||
| lib\libtensorflow_framework.$(TensorFlowMajorVersion).dylib; | |||||
| include\tensorflow\c\LICENSE" | |||||
| Runtime="osx-x64" /> | |||||
| <AdditionalDownloadFile Include="https://raw.githubusercontent.com/tensorflow/tensorflow/master/LICENSE" | |||||
| DownloadFile="$(BinDir)LICENSE" /> | |||||
| </ItemGroup> | |||||
| <Target Name="PrepareArchives"> | |||||
| <ItemGroup> | |||||
| <!-- although we could extract all archives on all machines, mac requires a fixup which can only be run on mac | |||||
| so we split these per-rid and join during the official build packaging. --> | |||||
| <TensorFlowArchive | |||||
| Include="@(TensorFlowConfig->'https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-gpu-%(Identity)-x86_64-$(TensorFlowVersion)%(FileExtension)')" /> | |||||
| <!-- set up metdata used by all targets --> | |||||
| <TensorFlowArchive DownloadFile="$(BinDir)%(FileName)%(Extension)" | |||||
| DownloadShaFile="$(BinDir)%(FileName)%(Extension).sha" | |||||
| ExtractDirectory="$(BinDir)%(FileName)" | |||||
| ExtractSemaphore="$(BinDir)%(FileName)\.extracted" | |||||
| LocalShaFile="$(MSBuildProjectDirectory)\%(FileName)%(Extension).sha"/> | |||||
| </ItemGroup> | |||||
| <Message Importance="high" Text="%(TensorFlowConfig.Runtime)"/> | |||||
| </Target> | |||||
| <Target Name="DownloadArchives" | |||||
| DependsOnTargets="PrepareArchives" | |||||
| Inputs="$(MSBuildProjectFile)" | |||||
| Outputs="@(TensorFlowArchive->'%(DownloadFile)');@(AdditionalDownloadFile->'%(DownloadFile)')"> | |||||
| <MakeDir Directories="$(BinDir)" /> | |||||
| <ItemGroup> | |||||
| <_downloadFiles Include="@(TensorFlowArchive);@(AdditionalDownloadFile)" Url="%(Identity)" DestinationFile="%(DownloadFile)" /> | |||||
| </ItemGroup> | |||||
| <Message Importance="High" Text="Downloading '%(_downloadFiles.Identity)' to '$(BinDir)'." /> | |||||
| <DownloadFile SourceUrl="%(_downloadFiles.Identity)" DestinationFolder="$(BinDir)"> | |||||
| <Output TaskParameter="DownloadedFile" ItemName="Content" /> | |||||
| </DownloadFile> | |||||
| </Target> | |||||
| <Target Name="ValidateAndExtractArchives" | |||||
| DependsOnTargets="DownloadArchives" | |||||
| Inputs="@(TensorFlowArchive->'%(DownloadFile)')" | |||||
| Outputs="@(TensorFlowArchive->'%(ExtractSemaphore)')"> | |||||
| <GetFileHash Files="@(TensorFlowArchive->'%(DownloadFile)')" Algorithm="SHA512"> | |||||
| <Output | |||||
| TaskParameter="Items" | |||||
| ItemName="FilesWithHashes" /> | |||||
| </GetFileHash> | |||||
| <WriteLinesToFile File="%(FilesWithHashes.Identity).sha" Lines="%(FilesWithHashes.FileHash)" Overwrite="true"/> | |||||
| <!-- If specified we'll update the checked in SHAs with the downloaded ones. --> | |||||
| <Copy Condition="'$(UpdateSHA)' == 'true'" | |||||
| SourceFiles="@(TensorFlowArchive->'%(DownloadShaFile)')" | |||||
| DestinationFiles="@(TensorFlowArchive->'%(LocalShaFile)')" /> | |||||
| <ItemGroup> | |||||
| <TensorFlowArchive> | |||||
| <DownloadSha>@(FilesWithHashes->'%(FileHash)')</DownloadSha> | |||||
| <LocalSha>$([System.IO.File]::ReadAllText('%(LocalShaFile)').Replace("%0A", "").Replace("%0D", ""))</LocalSha> | |||||
| </TensorFlowArchive> | |||||
| </ItemGroup> | |||||
| <Error Condition="!Exists('%(TensorFlowArchive.LocalShaFile)')" Text="SHA file '%(TensorFlowArchive.LocalShaFile)' does not exist. Build with /p:UpdateSHA=true to save it." /> | |||||
| <Message Importance="High" Text="@TensorFlowArchive->'%(TensorFlowArchive.DownloadFile) - %(TensorFlowArchive.LocalSha) - %(TensorFlowArchive.DownloadSha)"/> | |||||
| <!-- Validate that the downloaded SHAs match the expected checked in SHAs --> | |||||
| <Error Condition="'%(TensorFlowArchive.LocalSha)' != '%(TensorFlowArchive.DownloadSha)'" Text="Downloaded file '%(TensorFlowArchive.DownloadFile)' has unexpected SHA.%0A expected: %(TensorFlowArchive.LocalSha)%0A --actual: %(TensorFlowArchive.DownloadSha)%0ABuild with /p:UpdateSHA=true if you intentionally changed the URL and wish to update the SHAs, otherwise this could indicate an incomplete download or intercerpted URL and should be examined." /> | |||||
| <!-- The archives are valid, lets extract them, ensuring an empty directory --> | |||||
| <RemoveDir Directories="@(TensorFlowArchive->'%(ExtractDirectory)')" /> | |||||
| <MakeDir Directories="@(TensorFlowArchive->'%(ExtractDirectory)')" /> | |||||
| <Message Importance="High" Text="Decompressing '%(TensorFlowArchive.DownloadFile)' to '%(TensorFlowArchive.ExtractDirectory)'." /> | |||||
| <Unzip Condition="'%(TensorFlowArchive.FileExtension)' == '.zip'" | |||||
| SourceFiles="%(TensorFlowArchive.DownloadFile)" | |||||
| DestinationFolder="%(TensorFlowArchive.ExtractDirectory)" /> | |||||
| <Exec Condition="'$(OS)' != 'Windows_NT' AND '%(TensorFlowArchive.FileExtension)' == '.tar.gz'" | |||||
| WorkingDirectory="$(MSBuildThisFileDirectory)" | |||||
| Command="tar -xzm --hard-dereference -f %(TensorFlowArchive.DownloadFile) -C %(TensorFlowArchive.ExtractDirectory)" /> | |||||
| <Exec Condition="'$(OS)' != 'Windows_NT'" | |||||
| Command="chmod -R +w %(TensorFlowArchive.ExtractDirectory)" /> | |||||
| <Touch Files="@(TensorFlowArchive->'%(ExtractSemaphore)')" AlwaysCreate="true" /> | |||||
| </Target> | |||||
| <!-- Select the files we want to copy out of each archive. --> | |||||
| <Target Name="GetFilesFromArchive" | |||||
| DependsOnTargets="ValidateAndExtractArchives" > | |||||
| <ItemGroup> | |||||
| <!-- batch rather than transform so that we can split FilesFromArchive metadata --> | |||||
| <_fileFromArchive Include="%(TensorFlowArchive.FilesFromArchive)" ExtractDirectory="%(TensorFlowArchive.ExtractDirectory)" Runtime="%(TensorFlowArchive.Runtime)" /> | |||||
| <_fileFromArchive DestinationFile="%(FileName)%(Extension)"/> | |||||
| <_fileFromArchive PackagePath="runtimes\%(_fileFromArchive.Runtime)\native\%(_fileFromArchive.DestinationFile)" /> | |||||
| <!-- LICENSE from the package is actually THIRD_PARTY_NOTICES--> | |||||
| <_fileFromArchive Condition="'%(DestinationFile)' == 'LICENSE'" PackagePath="THIRD_PARTY_NOTICES.txt" Runtime="" /> | |||||
| <!-- copy to packaging location --> | |||||
| <FilesFromArchive Include="@(_fileFromArchive->'%(ExtractDirectory)\%(Identity)')" | |||||
| TargetPath="$(PackageAssetsPath)$(MSBuildProjectName)\%(PackagePath)" /> | |||||
| <!-- include LICENSE that was downloaded from GitHub --> | |||||
| <FilesFromArchive Include="$(BinDir)\LICENSE" | |||||
| TargetPath="$(PackageAssetsPath)$(MSBuildProjectName)\LICENSE.txt" /> | |||||
| <!-- copy to NativeAssets location, only for current RID, so that they may be used by tests --> | |||||
| <!--<FilesFromArchive Condition="'$(PackageRID)' == '%(_fileFromArchive.Runtime)'" | |||||
| Include="@(_fileFromArchive->'%(ExtractDirectory)\%(Identity)')" | |||||
| TargetPath="$(NativeAssetsBuiltPath)\%(_fileFromArchive.DestinationFile)" />--> | |||||
| </ItemGroup> | |||||
| </Target> | |||||
| <Target Name="CopyFilesFromArchive" | |||||
| DependsOnTargets="GetFilesFromArchive"> | |||||
| <Message Importance="High" Text="@(FilesFromArchive) -> %(FilesFromArchive.TargetPath)" /> | |||||
| <Copy SourceFiles="@(FilesFromArchive)" | |||||
| DestinationFiles="@(FilesFromArchive->'%(TargetPath)')" /> | |||||
| </Target> | |||||
| <Target Name="Clean"> | |||||
| <Message Importance="High" Text="Deleting $(BinDir);$(ObjDir)" /> | |||||
| <RemoveDir Directories="$(BinDir);$(ObjDir)" /> | |||||
| </Target> | |||||
| </Project> | |||||
| @@ -0,0 +1 @@ | |||||
| 850A27858FA951DF77A78CD1BD78B54F6EE2532DD5A49F0579A7B02C795C62F0212F20177EAEA2BD77BD451A57FBBD1348362492F9E14BFE5CA5028C71711293 | |||||
| @@ -0,0 +1,24 @@ | |||||
| using System; | |||||
| namespace TensorFlowDatasets | |||||
| { | |||||
| /// <summary> | |||||
| /// Abstract base class for all datasets. | |||||
| /// </summary> | |||||
| public class DatasetBuilder | |||||
| { | |||||
| /// <summary> | |||||
| /// Downloads and prepares dataset for reading. | |||||
| /// </summary> | |||||
| /// <param name="download_dir"> | |||||
| /// directory where downloaded files are stored. | |||||
| /// </param> | |||||
| /// <param name="download_config"> | |||||
| /// further configuration for downloading and preparing dataset. | |||||
| /// </param> | |||||
| public void download_and_prepare(string download_dir = null, DownloadConfig download_config = null) | |||||
| { | |||||
| } | |||||
| } | |||||
| } | |||||
| @@ -0,0 +1,10 @@ | |||||
| using System; | |||||
| using System.Collections.Generic; | |||||
| using System.Text; | |||||
| namespace TensorFlowDatasets | |||||
| { | |||||
| public class DownloadConfig | |||||
| { | |||||
| } | |||||
| } | |||||
| @@ -0,0 +1,19 @@ | |||||
| <Project Sdk="Microsoft.NET.Sdk"> | |||||
| <PropertyGroup> | |||||
| <TargetFramework>netcoreapp2.2</TargetFramework> | |||||
| <PackageId>SciSharp.TensorFlowDatasets</PackageId> | |||||
| <Version>0.0.1</Version> | |||||
| <Authors>SciSharp Team</Authors> | |||||
| <Product>TensorFlow Datasets</Product> | |||||
| <GeneratePackageOnBuild>true</GeneratePackageOnBuild> | |||||
| <PackageIconUrl>https://avatars3.githubusercontent.com/u/44989469?s=200&v=4</PackageIconUrl> | |||||
| <PackageProjectUrl>http://scisharpstack.org</PackageProjectUrl> | |||||
| <Description>TensorFlow Datasets provides many public datasets as tf.data.Datasets.</Description> | |||||
| <RepositoryUrl>https://github.com/SciSharp/TensorFlow.NET</RepositoryUrl> | |||||
| <RepositoryType>git</RepositoryType> | |||||
| <PackageTags>SciSharp, Dataset, TensorFlow</PackageTags> | |||||
| <Copyright>Apache 2.0</Copyright> | |||||
| </PropertyGroup> | |||||
| </Project> | |||||
| @@ -27,5 +27,54 @@ namespace Tensorflow.Hub | |||||
| labels.astype(dataType); | labels.astype(dataType); | ||||
| Labels = labels; | Labels = labels; | ||||
| } | } | ||||
| public (NDArray, NDArray) GetNextBatch(int batch_size, bool fake_data = false, bool shuffle = true) | |||||
| { | |||||
| var start = IndexInEpoch; | |||||
| // Shuffle for the first epoch | |||||
| if(EpochsCompleted == 0 && start == 0 && shuffle) | |||||
| { | |||||
| var perm0 = np.arange(NumOfExamples); | |||||
| np.random.shuffle(perm0); | |||||
| Data = Data[perm0]; | |||||
| Labels = Labels[perm0]; | |||||
| } | |||||
| // Go to the next epoch | |||||
| if (start + batch_size > NumOfExamples) | |||||
| { | |||||
| // Finished epoch | |||||
| EpochsCompleted += 1; | |||||
| // Get the rest examples in this epoch | |||||
| var rest_num_examples = NumOfExamples - start; | |||||
| //var images_rest_part = _images[np.arange(start, _num_examples)]; | |||||
| //var labels_rest_part = _labels[np.arange(start, _num_examples)]; | |||||
| // Shuffle the data | |||||
| if (shuffle) | |||||
| { | |||||
| var perm = np.arange(NumOfExamples); | |||||
| np.random.shuffle(perm); | |||||
| Data = Data[perm]; | |||||
| Labels = Labels[perm]; | |||||
| } | |||||
| start = 0; | |||||
| IndexInEpoch = batch_size - rest_num_examples; | |||||
| var end = IndexInEpoch; | |||||
| var images_new_part = Data[np.arange(start, end)]; | |||||
| var labels_new_part = Labels[np.arange(start, end)]; | |||||
| /*return (np.concatenate(new float[][] { images_rest_part.Data<float>(), images_new_part.Data<float>() }, axis: 0), | |||||
| np.concatenate(new float[][] { labels_rest_part.Data<float>(), labels_new_part.Data<float>() }, axis: 0));*/ | |||||
| return (images_new_part, labels_new_part); | |||||
| } | |||||
| else | |||||
| { | |||||
| IndexInEpoch += batch_size; | |||||
| var end = IndexInEpoch; | |||||
| return (Data[np.arange(start, end)], Labels[np.arange(start, end)]); | |||||
| } | |||||
| } | |||||
| } | } | ||||
| } | } | ||||
| @@ -15,14 +15,26 @@ namespace Tensorflow.Hub | |||||
| private const string TEST_IMAGES = "t10k-images-idx3-ubyte.gz"; | private const string TEST_IMAGES = "t10k-images-idx3-ubyte.gz"; | ||||
| private const string TEST_LABELS = "t10k-labels-idx1-ubyte.gz"; | private const string TEST_LABELS = "t10k-labels-idx1-ubyte.gz"; | ||||
| public static async Task<Datasets<MnistDataSet>> LoadAsync(string trainDir, bool oneHot = false) | |||||
| public static async Task<Datasets<MnistDataSet>> LoadAsync(string trainDir, bool oneHot = false, int? trainSize = null, int? validationSize = null, int? testSize = null) | |||||
| { | { | ||||
| var loader = new MnistModelLoader(); | var loader = new MnistModelLoader(); | ||||
| return await loader.LoadAsync(new ModelLoadSetting | |||||
| var setting = new ModelLoadSetting | |||||
| { | { | ||||
| TrainDir = trainDir, | TrainDir = trainDir, | ||||
| OneHot = oneHot | OneHot = oneHot | ||||
| }); | |||||
| }; | |||||
| if (trainSize.HasValue) | |||||
| setting.TrainSize = trainSize.Value; | |||||
| if (validationSize.HasValue) | |||||
| setting.ValidationSize = validationSize.Value; | |||||
| if (testSize.HasValue) | |||||
| setting.TestSize = testSize.Value; | |||||
| return await loader.LoadAsync(setting); | |||||
| } | } | ||||
| public async Task<Datasets<MnistDataSet>> LoadAsync(ModelLoadSetting setting) | public async Task<Datasets<MnistDataSet>> LoadAsync(ModelLoadSetting setting) | ||||
| @@ -86,7 +98,7 @@ namespace Tensorflow.Hub | |||||
| var train = new MnistDataSet(trainImages, trainLabels, dtype, reshape); | var train = new MnistDataSet(trainImages, trainLabels, dtype, reshape); | ||||
| var validation = new MnistDataSet(validationImages, validationLabels, dtype, reshape); | var validation = new MnistDataSet(validationImages, validationLabels, dtype, reshape); | ||||
| var test = new MnistDataSet(trainImages, trainLabels, dtype, reshape); | |||||
| var test = new MnistDataSet(testImages, testLabels, dtype, reshape); | |||||
| return new Datasets<MnistDataSet>(train, validation, test); | return new Datasets<MnistDataSet>(train, validation, test); | ||||
| } | } | ||||
| @@ -30,8 +30,8 @@ namespace Tensorflow | |||||
| get | get | ||||
| { | { | ||||
| var data = new byte[buffer.length]; | var data = new byte[buffer.length]; | ||||
| if (buffer.length > 0) | |||||
| Marshal.Copy(buffer.data, data, 0, (int)buffer.length); | |||||
| if (data.Length > 0) | |||||
| Marshal.Copy(buffer.data, data, 0, data.Length); | |||||
| return data; | return data; | ||||
| } | } | ||||
| } | } | ||||
| @@ -128,7 +128,7 @@ namespace Tensorflow | |||||
| IntPtr c_op; | IntPtr c_op; | ||||
| while ((c_op = c_api.TF_GraphNextOperation(graph, ref pos)) != IntPtr.Zero) | while ((c_op = c_api.TF_GraphNextOperation(graph, ref pos)) != IntPtr.Zero) | ||||
| { | { | ||||
| yield return c_op; | |||||
| yield return new Operation(c_op, graph); | |||||
| } | } | ||||
| } | } | ||||
| } | } | ||||
| @@ -31,8 +31,9 @@ namespace Tensorflow | |||||
| private GraphDef _as_graph_def(bool add_shapes = false) | private GraphDef _as_graph_def(bool add_shapes = false) | ||||
| { | { | ||||
| var buffer = ToGraphDef(Status); | |||||
| Status.Check(); | |||||
| var status = new Status(); | |||||
| var buffer = ToGraphDef(status); | |||||
| status.Check(); | |||||
| var def = GraphDef.Parser.ParseFrom(buffer); | var def = GraphDef.Parser.ParseFrom(buffer); | ||||
| buffer.Dispose(); | buffer.Dispose(); | ||||
| @@ -43,16 +43,20 @@ namespace Tensorflow | |||||
| var bytes = File.ReadAllBytes(file_path); | var bytes = File.ReadAllBytes(file_path); | ||||
| var graph_def = new Tensorflow.Buffer(bytes); | var graph_def = new Tensorflow.Buffer(bytes); | ||||
| var opts = c_api.TF_NewImportGraphDefOptions(); | var opts = c_api.TF_NewImportGraphDefOptions(); | ||||
| c_api.TF_GraphImportGraphDef(_handle, graph_def, opts, Status); | |||||
| return Status; | |||||
| var status = new Status(); | |||||
| c_api.TF_GraphImportGraphDef(_handle, graph_def, opts, status); | |||||
| return status; | |||||
| } | } | ||||
| public Status Import(byte[] bytes) | |||||
| public Status Import(byte[] bytes, string prefix = "") | |||||
| { | { | ||||
| var graph_def = new Tensorflow.Buffer(bytes); | var graph_def = new Tensorflow.Buffer(bytes); | ||||
| var opts = c_api.TF_NewImportGraphDefOptions(); | var opts = c_api.TF_NewImportGraphDefOptions(); | ||||
| c_api.TF_GraphImportGraphDef(_handle, graph_def, opts, Status); | |||||
| return Status; | |||||
| c_api.TF_ImportGraphDefOptionsSetPrefix(opts, prefix); | |||||
| var status = new Status(); | |||||
| c_api.TF_GraphImportGraphDef(_handle, graph_def, opts, status); | |||||
| c_api.TF_DeleteImportGraphDefOptions(opts); | |||||
| return status; | |||||
| } | } | ||||
| public static Graph ImportFromPB(string file_path, string name = null) | public static Graph ImportFromPB(string file_path, string name = null) | ||||
| @@ -38,6 +38,31 @@ namespace Tensorflow | |||||
| return c_api.TF_NewOperation(_handle, opType, opName); | return c_api.TF_NewOperation(_handle, opType, opName); | ||||
| } | } | ||||
| public unsafe Operation[] ReturnOperations(IntPtr results) | |||||
| { | |||||
| TF_Operation return_oper_handle = new TF_Operation(); | |||||
| int num_return_opers = 0; | |||||
| c_api.TF_ImportGraphDefResultsReturnOperations(results, ref num_return_opers, ref return_oper_handle); | |||||
| Operation[] return_opers = new Operation[num_return_opers]; | |||||
| for (int i = 0; i < num_return_opers; i++) | |||||
| { | |||||
| var handle = return_oper_handle.node + Marshal.SizeOf<TF_Operation>() * i; | |||||
| return_opers[i] = new Operation(*(IntPtr*)handle); | |||||
| } | |||||
| return return_opers; | |||||
| } | |||||
| public Operation OperationByName(string operName) | |||||
| { | |||||
| return c_api.TF_GraphOperationByName(_handle, operName); | |||||
| } | |||||
| public ITensorOrOperation[] get_operations() | |||||
| { | |||||
| return _nodes_by_name.Values.Select(x => x).ToArray(); | |||||
| } | |||||
| /// <summary> | /// <summary> | ||||
| /// Returns the `Operation` with the given `name`. | /// Returns the `Operation` with the given `name`. | ||||
| /// | /// | ||||
| @@ -15,6 +15,7 @@ | |||||
| ******************************************************************************/ | ******************************************************************************/ | ||||
| using System; | using System; | ||||
| using System.Collections; | |||||
| using System.Collections.Generic; | using System.Collections.Generic; | ||||
| using System.Linq; | using System.Linq; | ||||
| using System.Runtime.InteropServices; | using System.Runtime.InteropServices; | ||||
| @@ -72,7 +73,7 @@ namespace Tensorflow | |||||
| all variables that are created during the construction of a graph. The caller | all variables that are created during the construction of a graph. The caller | ||||
| may define additional collections by specifying a new name. | may define additional collections by specifying a new name. | ||||
| */ | */ | ||||
| public partial class Graph : IPython, IDisposable | |||||
| public partial class Graph : IPython, IDisposable, IEnumerable<Operation> | |||||
| { | { | ||||
| private IntPtr _handle; | private IntPtr _handle; | ||||
| private Dictionary<int, ITensorOrOperation> _nodes_by_id; | private Dictionary<int, ITensorOrOperation> _nodes_by_id; | ||||
| @@ -87,8 +88,7 @@ namespace Tensorflow | |||||
| private string _graph_key; | private string _graph_key; | ||||
| public string graph_key => _graph_key; | public string graph_key => _graph_key; | ||||
| public string _last_loss_reduction; | public string _last_loss_reduction; | ||||
| public bool _is_loss_scaled_by_optimizer { get; set; } | |||||
| public Status Status { get; } | |||||
| public bool _is_loss_scaled_by_optimizer { get; set; } | |||||
| /// <summary> | /// <summary> | ||||
| /// True if the graph is considered "finalized". In that case no | /// True if the graph is considered "finalized". In that case no | ||||
| @@ -106,7 +106,6 @@ namespace Tensorflow | |||||
| public Graph() | public Graph() | ||||
| { | { | ||||
| _handle = c_api.TF_NewGraph(); | _handle = c_api.TF_NewGraph(); | ||||
| Status = new Status(); | |||||
| _nodes_by_id = new Dictionary<int, ITensorOrOperation>(); | _nodes_by_id = new Dictionary<int, ITensorOrOperation>(); | ||||
| _nodes_by_name = new Dictionary<string, ITensorOrOperation>(); | _nodes_by_name = new Dictionary<string, ITensorOrOperation>(); | ||||
| _names_in_use = new Dictionary<string, int>(); | _names_in_use = new Dictionary<string, int>(); | ||||
| @@ -116,11 +115,14 @@ namespace Tensorflow | |||||
| public Graph(IntPtr handle) | public Graph(IntPtr handle) | ||||
| { | { | ||||
| _handle = handle; | _handle = handle; | ||||
| Status = new Status(); | |||||
| _nodes_by_id = new Dictionary<int, ITensorOrOperation>(); | _nodes_by_id = new Dictionary<int, ITensorOrOperation>(); | ||||
| _nodes_by_name = new Dictionary<string, ITensorOrOperation>(); | _nodes_by_name = new Dictionary<string, ITensorOrOperation>(); | ||||
| _names_in_use = new Dictionary<string, int>(); | _names_in_use = new Dictionary<string, int>(); | ||||
| _graph_key = $"grap-key-{ops.uid()}/"; | _graph_key = $"grap-key-{ops.uid()}/"; | ||||
| } | |||||
| public void __enter__() | |||||
| { | |||||
| } | } | ||||
| public ITensorOrOperation as_graph_element(object obj, bool allow_tensor = true, bool allow_operation = true) | public ITensorOrOperation as_graph_element(object obj, bool allow_tensor = true, bool allow_operation = true) | ||||
| @@ -409,31 +411,6 @@ namespace Tensorflow | |||||
| return return_outputs; | return return_outputs; | ||||
| } | } | ||||
| public unsafe Operation[] ReturnOperations(IntPtr results) | |||||
| { | |||||
| TF_Operation return_oper_handle = new TF_Operation(); | |||||
| int num_return_opers = 0; | |||||
| c_api.TF_ImportGraphDefResultsReturnOperations(results, ref num_return_opers, ref return_oper_handle); | |||||
| Operation[] return_opers = new Operation[num_return_opers]; | |||||
| for (int i = 0; i < num_return_opers; i++) | |||||
| { | |||||
| var handle = return_oper_handle.node + Marshal.SizeOf<TF_Operation>() * i; | |||||
| return_opers[i] = new Operation(*(IntPtr*)handle); | |||||
| } | |||||
| return return_opers; | |||||
| } | |||||
| public Operation OperationByName(string operName) | |||||
| { | |||||
| return c_api.TF_GraphOperationByName(_handle, operName); | |||||
| } | |||||
| public ITensorOrOperation[] get_operations() | |||||
| { | |||||
| return _nodes_by_name.Values.Select(x => x).ToArray(); | |||||
| } | |||||
| public string[] get_all_collection_keys() | public string[] get_all_collection_keys() | ||||
| { | { | ||||
| return _collections.Keys.Where(x => !x.StartsWith("__")).ToArray(); | return _collections.Keys.Where(x => !x.StartsWith("__")).ToArray(); | ||||
| @@ -468,7 +445,12 @@ namespace Tensorflow | |||||
| public void Dispose() | public void Dispose() | ||||
| { | { | ||||
| // c_api.TF_DeleteGraph(_handle); | |||||
| /*if (_handle != IntPtr.Zero) | |||||
| c_api.TF_DeleteGraph(_handle); | |||||
| _handle = IntPtr.Zero; | |||||
| GC.SuppressFinalize(this);*/ | |||||
| } | } | ||||
| /// <summary> | /// <summary> | ||||
| @@ -481,17 +463,46 @@ namespace Tensorflow | |||||
| public Tensor get_tensor_by_name(string name) | public Tensor get_tensor_by_name(string name) | ||||
| { | { | ||||
| return (Tensor)this.as_graph_element(name, allow_tensor: true, allow_operation: false); | return (Tensor)this.as_graph_element(name, allow_tensor: true, allow_operation: false); | ||||
| } | |||||
| public void __enter__() | |||||
| { | |||||
| } | |||||
| public TensorShape GetTensorShape(TF_Output output) | |||||
| { | |||||
| var status = new Status(); | |||||
| var ndim = c_api.TF_GraphGetTensorNumDims(_handle, output, status); | |||||
| status.Check(); | |||||
| if (ndim == -1) | |||||
| return new TensorShape(); | |||||
| var dims = new long[ndim]; | |||||
| c_api.TF_GraphGetTensorShape(_handle, output, dims, dims.Length, status); | |||||
| status.Check(); | |||||
| return new TensorShape(dims.Select(x => (int)x).ToArray()); | |||||
| } | |||||
| public override string ToString() | |||||
| { | |||||
| int len = 0; | |||||
| return c_api.TF_GraphDebugString(_handle, out len); | |||||
| } | } | ||||
| public void __exit__() | public void __exit__() | ||||
| { | { | ||||
| } | |||||
| } | |||||
| private IEnumerable<Operation> GetEnumerable() | |||||
| => c_api_util.tf_operations(this); | |||||
| IEnumerator<Operation> IEnumerable<Operation>.GetEnumerator() | |||||
| => GetEnumerable().GetEnumerator(); | |||||
| IEnumerator IEnumerable.GetEnumerator() | |||||
| { | |||||
| throw new NotImplementedException(); | |||||
| } | |||||
| public static implicit operator IntPtr(Graph graph) | public static implicit operator IntPtr(Graph graph) | ||||
| { | { | ||||
| return graph._handle; | return graph._handle; | ||||
| @@ -43,6 +43,9 @@ namespace Tensorflow | |||||
| [DllImport(TensorFlowLibName)] | [DllImport(TensorFlowLibName)] | ||||
| public static extern void TF_DeleteImportGraphDefResults(IntPtr results); | public static extern void TF_DeleteImportGraphDefResults(IntPtr results); | ||||
| [DllImport(TensorFlowLibName)] | |||||
| public static extern string TF_GraphDebugString(IntPtr graph, out int len); | |||||
| [DllImport(TensorFlowLibName)] | [DllImport(TensorFlowLibName)] | ||||
| public static extern void TF_GraphGetOpDef(IntPtr graph, string op_name, IntPtr output_op_def, IntPtr status); | public static extern void TF_GraphGetOpDef(IntPtr graph, string op_name, IntPtr output_op_def, IntPtr status); | ||||
| @@ -100,6 +103,7 @@ namespace Tensorflow | |||||
| /// <param name="status">TF_Status*</param> | /// <param name="status">TF_Status*</param> | ||||
| [DllImport(TensorFlowLibName)] | [DllImport(TensorFlowLibName)] | ||||
| public static extern void TF_GraphImportGraphDef(IntPtr graph, IntPtr graph_def, IntPtr options, IntPtr status); | public static extern void TF_GraphImportGraphDef(IntPtr graph, IntPtr graph_def, IntPtr options, IntPtr status); | ||||
| /// <summary> | /// <summary> | ||||
| /// Iterate through the operations of a graph. | /// Iterate through the operations of a graph. | ||||
| /// </summary> | /// </summary> | ||||
| @@ -23,7 +23,10 @@ namespace Tensorflow | |||||
| /// </summary> | /// </summary> | ||||
| public partial class Operation | public partial class Operation | ||||
| { | { | ||||
| public static implicit operator Operation(IntPtr handle) => new Operation(handle); | |||||
| // make sure the new op is in the same graph instance | |||||
| public static implicit operator Operation(IntPtr handle) | |||||
| => new Operation(handle); | |||||
| public static implicit operator IntPtr(Operation op) => op._handle; | public static implicit operator IntPtr(Operation op) => op._handle; | ||||
| public static implicit operator Tensor(Operation op) => op.output; | public static implicit operator Tensor(Operation op) => op.output; | ||||
| @@ -35,6 +35,8 @@ namespace Tensorflow | |||||
| public int OutputNumConsumers(int index) => c_api.TF_OperationOutputNumConsumers(new TF_Output(_handle, index)); | public int OutputNumConsumers(int index) => c_api.TF_OperationOutputNumConsumers(new TF_Output(_handle, index)); | ||||
| public TF_Output this[int index] => _tf_output(index); | |||||
| public unsafe TF_Input[] OutputConsumers(int index, int max_consumers) | public unsafe TF_Input[] OutputConsumers(int index, int max_consumers) | ||||
| { | { | ||||
| int size = Marshal.SizeOf<TF_Input>(); | int size = Marshal.SizeOf<TF_Input>(); | ||||
| @@ -32,20 +32,19 @@ namespace Tensorflow | |||||
| protected int _current_version; | protected int _current_version; | ||||
| protected byte[] _target; | protected byte[] _target; | ||||
| protected IntPtr _session; | protected IntPtr _session; | ||||
| public Status Status; | |||||
| public Graph graph => _graph; | public Graph graph => _graph; | ||||
| public BaseSession(string target = "", Graph g = null, SessionOptions opts = null) | public BaseSession(string target = "", Graph g = null, SessionOptions opts = null) | ||||
| { | { | ||||
| _graph = g is null ? ops.get_default_graph() : g; | _graph = g is null ? ops.get_default_graph() : g; | ||||
| _graph.as_default(); | |||||
| _target = UTF8Encoding.UTF8.GetBytes(target); | _target = UTF8Encoding.UTF8.GetBytes(target); | ||||
| SessionOptions newOpts = null; | SessionOptions newOpts = null; | ||||
| if (opts == null) | if (opts == null) | ||||
| newOpts = c_api.TF_NewSessionOptions(); | newOpts = c_api.TF_NewSessionOptions(); | ||||
| Status = new Status(); | |||||
| var Status = new Status(); | |||||
| _session = c_api.TF_NewSession(_graph, opts ?? newOpts, Status); | _session = c_api.TF_NewSession(_graph, opts ?? newOpts, Status); | ||||
| @@ -15,6 +15,7 @@ | |||||
| ******************************************************************************/ | ******************************************************************************/ | ||||
| using System; | using System; | ||||
| using System.Runtime.InteropServices; | |||||
| namespace Tensorflow | namespace Tensorflow | ||||
| { | { | ||||
| @@ -26,8 +27,8 @@ namespace Tensorflow | |||||
| } | } | ||||
| public Session(IntPtr handle) | |||||
| : base("", null, null) | |||||
| public Session(IntPtr handle, Graph g = null) | |||||
| : base("", g, null) | |||||
| { | { | ||||
| _session = handle; | _session = handle; | ||||
| } | } | ||||
| @@ -36,7 +37,7 @@ namespace Tensorflow | |||||
| : base("", g, opts) | : base("", g, opts) | ||||
| { | { | ||||
| if (s == null) | if (s == null) | ||||
| s = Status; | |||||
| s = new Status(); | |||||
| } | } | ||||
| public Session as_default() | public Session as_default() | ||||
| @@ -51,17 +52,25 @@ namespace Tensorflow | |||||
| var status = new Status(); | var status = new Status(); | ||||
| var opt = c_api.TF_NewSessionOptions(); | var opt = c_api.TF_NewSessionOptions(); | ||||
| var tags = new string[] { "serve" }; | |||||
| var buffer = new TF_Buffer(); | var buffer = new TF_Buffer(); | ||||
| var sess = c_api.TF_LoadSessionFromSavedModel(opt, IntPtr.Zero, path, new string[0], 0, graph, ref buffer, status); | |||||
| //var bytes = new Buffer(buffer.data).Data; | |||||
| //var meta_graph = MetaGraphDef.Parser.ParseFrom(bytes); | |||||
| var sess = c_api.TF_LoadSessionFromSavedModel(opt, | |||||
| IntPtr.Zero, | |||||
| path, | |||||
| tags, | |||||
| tags.Length, | |||||
| graph, | |||||
| ref buffer, | |||||
| status); | |||||
| // load graph bytes | |||||
| // var data = new byte[buffer.length]; | |||||
| // Marshal.Copy(buffer.data, data, 0, (int)buffer.length); | |||||
| // var meta_graph = MetaGraphDef.Parser.ParseFrom(data);*/ | |||||
| status.Check(); | status.Check(); | ||||
| new Graph(graph).as_default(); | |||||
| return sess; | |||||
| return new Session(sess, g: new Graph(graph).as_default()); | |||||
| } | } | ||||
| public static implicit operator IntPtr(Session session) => session._session; | public static implicit operator IntPtr(Session session) => session._session; | ||||
| @@ -74,8 +83,25 @@ namespace Tensorflow | |||||
| public void Dispose() | public void Dispose() | ||||
| { | { | ||||
| c_api.TF_DeleteSession(_session, Status); | |||||
| Status.Dispose(); | |||||
| IntPtr h = IntPtr.Zero; | |||||
| lock (this) | |||||
| { | |||||
| h = _session; | |||||
| _session = IntPtr.Zero; | |||||
| } | |||||
| if (h != IntPtr.Zero) | |||||
| { | |||||
| var status = new Status(); | |||||
| c_api.TF_DeleteSession(h, status); | |||||
| status.Check(true); | |||||
| } | |||||
| GC.SuppressFinalize(this); | |||||
| } | |||||
| ~Session() | |||||
| { | |||||
| Dispose(); | |||||
| } | } | ||||
| public void __enter__() | public void __enter__() | ||||
| @@ -5,7 +5,7 @@ | |||||
| <AssemblyName>TensorFlow.NET</AssemblyName> | <AssemblyName>TensorFlow.NET</AssemblyName> | ||||
| <RootNamespace>Tensorflow</RootNamespace> | <RootNamespace>Tensorflow</RootNamespace> | ||||
| <TargetTensorFlow>1.14.0</TargetTensorFlow> | <TargetTensorFlow>1.14.0</TargetTensorFlow> | ||||
| <Version>0.10.3</Version> | |||||
| <Version>0.10.7.2</Version> | |||||
| <Authors>Haiping Chen, Meinrad Recheis</Authors> | <Authors>Haiping Chen, Meinrad Recheis</Authors> | ||||
| <Company>SciSharp STACK</Company> | <Company>SciSharp STACK</Company> | ||||
| <GeneratePackageOnBuild>true</GeneratePackageOnBuild> | <GeneratePackageOnBuild>true</GeneratePackageOnBuild> | ||||
| @@ -17,7 +17,7 @@ | |||||
| <PackageTags>TensorFlow, NumSharp, SciSharp, MachineLearning, TensorFlow.NET, C#</PackageTags> | <PackageTags>TensorFlow, NumSharp, SciSharp, MachineLearning, TensorFlow.NET, C#</PackageTags> | ||||
| <Description>Google's TensorFlow full binding in .NET Standard. | <Description>Google's TensorFlow full binding in .NET Standard. | ||||
| Docs: https://tensorflownet.readthedocs.io</Description> | Docs: https://tensorflownet.readthedocs.io</Description> | ||||
| <AssemblyVersion>0.10.3.0</AssemblyVersion> | |||||
| <AssemblyVersion>0.10.7.2</AssemblyVersion> | |||||
| <PackageReleaseNotes>Changes since v0.9.0: | <PackageReleaseNotes>Changes since v0.9.0: | ||||
| 1. Added full connected Convolution Neural Network example. | 1. Added full connected Convolution Neural Network example. | ||||
| @@ -29,9 +29,14 @@ Docs: https://tensorflownet.readthedocs.io</Description> | |||||
| 7. Add BatchMatMulGrad. | 7. Add BatchMatMulGrad. | ||||
| 8. Upgrade NumSharp. | 8. Upgrade NumSharp. | ||||
| 9. Fix strided_slice_grad type convention error. | 9. Fix strided_slice_grad type convention error. | ||||
| 10. Add AbsGrad.</PackageReleaseNotes> | |||||
| 10. Add AbsGrad. | |||||
| 11. Fix Session.LoadFromSavedModel(string). | |||||
| 12. Add Tensor operator overloads. | |||||
| 13. Fix default graph and operation issue when import model. | |||||
| 14. Fix TF_String endcode and decode. | |||||
| 15. Fix Tensor memory leak.</PackageReleaseNotes> | |||||
| <LangVersion>7.2</LangVersion> | <LangVersion>7.2</LangVersion> | ||||
| <FileVersion>0.10.3.0</FileVersion> | |||||
| <FileVersion>0.10.7.2</FileVersion> | |||||
| <PackageLicenseFile>LICENSE</PackageLicenseFile> | <PackageLicenseFile>LICENSE</PackageLicenseFile> | ||||
| <PackageRequireLicenseAcceptance>true</PackageRequireLicenseAcceptance> | <PackageRequireLicenseAcceptance>true</PackageRequireLicenseAcceptance> | ||||
| <SignAssembly>true</SignAssembly> | <SignAssembly>true</SignAssembly> | ||||
| @@ -15,6 +15,7 @@ | |||||
| ******************************************************************************/ | ******************************************************************************/ | ||||
| using System; | using System; | ||||
| using System.Linq; | |||||
| using static Tensorflow.Python; | using static Tensorflow.Python; | ||||
| namespace Tensorflow | namespace Tensorflow | ||||
| @@ -63,22 +64,56 @@ namespace Tensorflow | |||||
| public static Tensor operator *(long constant, Tensor tensor) => BinaryOpWrapper("mul", constant, tensor); | public static Tensor operator *(long constant, Tensor tensor) => BinaryOpWrapper("mul", constant, tensor); | ||||
| public static Tensor operator *(ulong constant, Tensor tensor) => BinaryOpWrapper("mul", constant, tensor); | public static Tensor operator *(ulong constant, Tensor tensor) => BinaryOpWrapper("mul", constant, tensor); | ||||
| public static Tensor operator /(Tensor x, Tensor y) => BinaryOpWrapper("truediv", x, y); | |||||
| public static Tensor operator /(Tensor x, float y) => BinaryOpWrapper("truediv", x, y); | |||||
| private static readonly TF_DataType[] _intTfDataTypes = { | |||||
| TF_DataType.TF_INT8, TF_DataType.TF_INT16, TF_DataType.TF_INT32, TF_DataType.TF_INT64, | |||||
| TF_DataType.TF_QINT8, TF_DataType.TF_QINT16, TF_DataType.TF_QINT32, | |||||
| TF_DataType.TF_UINT8, TF_DataType.TF_UINT16, TF_DataType.TF_UINT32, TF_DataType.TF_UINT64 | |||||
| }; | |||||
| public static Tensor operator /(double x, Tensor y) => BinaryOpWrapper("truediv", x, y); | |||||
| public static Tensor operator /(float x, Tensor y) => BinaryOpWrapper("truediv", x, y); | public static Tensor operator /(float x, Tensor y) => BinaryOpWrapper("truediv", x, y); | ||||
| public static Tensor operator /(int x, Tensor y) => BinaryOpWrapper("floordiv", x, y); | |||||
| public static Tensor operator /(Tensor x, Tensor y) => | |||||
| _intTfDataTypes.Contains(x._dtype) | |||||
| ? BinaryOpWrapper("floordiv", x, y) | |||||
| : BinaryOpWrapper("truediv", x, y); | |||||
| public static Tensor operator /(Tensor x, int y) => BinaryOpWrapper("floordiv", x, y); | |||||
| public static Tensor operator /(Tensor x, float y) => BinaryOpWrapper("truediv", x, y); | |||||
| public static Tensor operator /(Tensor x, double y) => BinaryOpWrapper("truediv", x, y); | public static Tensor operator /(Tensor x, double y) => BinaryOpWrapper("truediv", x, y); | ||||
| public static Tensor operator %(Tensor x, Tensor y) => BinaryOpWrapper("mod", x, y); | public static Tensor operator %(Tensor x, Tensor y) => BinaryOpWrapper("mod", x, y); | ||||
| public static Tensor operator >(double x, Tensor y) => gen_math_ops.greater(x, y); | |||||
| public static Tensor operator >(float x, Tensor y) => gen_math_ops.greater(x, y); | |||||
| public static Tensor operator >(int x, Tensor y) => gen_math_ops.greater(x, y); | |||||
| public static Tensor operator >(Tensor x, Tensor y) => gen_math_ops.greater(x, y); | |||||
| public static Tensor operator >(Tensor x, int y) => gen_math_ops.greater(x, y); | public static Tensor operator >(Tensor x, int y) => gen_math_ops.greater(x, y); | ||||
| public static Tensor operator >=(Tensor x, Tensor y) => gen_math_ops.greater_equal(x, y); | |||||
| public static Tensor operator >(Tensor x, float y) => gen_math_ops.greater(x, y); | public static Tensor operator >(Tensor x, float y) => gen_math_ops.greater(x, y); | ||||
| public static Tensor operator >(Tensor x, double y) => gen_math_ops.greater(x, y); | public static Tensor operator >(Tensor x, double y) => gen_math_ops.greater(x, y); | ||||
| public static Tensor operator <(double x, Tensor y) => gen_math_ops.less(x, y); | |||||
| public static Tensor operator <(float x, Tensor y) => gen_math_ops.less(x, y); | |||||
| public static Tensor operator <(int x, Tensor y) => gen_math_ops.less(x, y); | |||||
| public static Tensor operator <(Tensor x, Tensor y) => gen_math_ops.less(x, y); | |||||
| public static Tensor operator <(Tensor x, int y) => gen_math_ops.less(x, y); | public static Tensor operator <(Tensor x, int y) => gen_math_ops.less(x, y); | ||||
| public static Tensor operator <=(Tensor x, Tensor y) => gen_math_ops.less_equal(x, y); | |||||
| public static Tensor operator <(Tensor x, float y) => gen_math_ops.less(x, y); | public static Tensor operator <(Tensor x, float y) => gen_math_ops.less(x, y); | ||||
| public static Tensor operator <(Tensor x, double y) => gen_math_ops.less(x, y); | public static Tensor operator <(Tensor x, double y) => gen_math_ops.less(x, y); | ||||
| public static Tensor operator >=(double x, Tensor y) => gen_math_ops.greater_equal(x, y); | |||||
| public static Tensor operator >=(float x, Tensor y) => gen_math_ops.greater_equal(x, y); | |||||
| public static Tensor operator >=(int x, Tensor y) => gen_math_ops.greater_equal(x, y); | |||||
| public static Tensor operator >=(Tensor x, Tensor y) => gen_math_ops.greater_equal(x, y); | |||||
| public static Tensor operator >=(Tensor x, int y) => gen_math_ops.greater_equal(x, y); | |||||
| public static Tensor operator >=(Tensor x, float y) => gen_math_ops.greater_equal(x, y); | |||||
| public static Tensor operator >=(Tensor x, double y) => gen_math_ops.greater_equal(x, y); | |||||
| public static Tensor operator <=(int x, Tensor y) => gen_math_ops.less_equal(x, y); | |||||
| public static Tensor operator <=(float x, Tensor y) => gen_math_ops.less_equal(x, y); | |||||
| public static Tensor operator <=(double x, Tensor y) => gen_math_ops.less_equal(x, y); | |||||
| public static Tensor operator <=(Tensor x, Tensor y) => gen_math_ops.less_equal(x, y); | |||||
| public static Tensor operator <=(Tensor x, int y) => gen_math_ops.less_equal(x, y); | |||||
| public static Tensor operator <=(Tensor x, float y) => gen_math_ops.less_equal(x, y); | |||||
| public static Tensor operator <=(Tensor x, double y) => gen_math_ops.less_equal(x, y); | |||||
| private static Tensor BinaryOpWrapper<Tx, Ty>(string name, Tx x, Ty y) | private static Tensor BinaryOpWrapper<Tx, Ty>(string name, Tx x, Ty y) | ||||
| { | { | ||||
| TF_DataType dtype = TF_DataType.DtInvalid; | TF_DataType dtype = TF_DataType.DtInvalid; | ||||
| @@ -99,6 +134,9 @@ namespace Tensorflow | |||||
| case "add": | case "add": | ||||
| result = gen_math_ops.add(x1, y1, name: scope); | result = gen_math_ops.add(x1, y1, name: scope); | ||||
| break; | break; | ||||
| case "floordiv": | |||||
| result = gen_math_ops.floor_div(x1, y1, name: scope); | |||||
| break; | |||||
| case "truediv": | case "truediv": | ||||
| result = gen_math_ops.real_div(x1, y1, name: scope); | result = gen_math_ops.real_div(x1, y1, name: scope); | ||||
| break; | break; | ||||
| @@ -19,6 +19,7 @@ using System; | |||||
| using System.Collections.Generic; | using System.Collections.Generic; | ||||
| using System.Linq; | using System.Linq; | ||||
| using System.Runtime.InteropServices; | using System.Runtime.InteropServices; | ||||
| using System.Text; | |||||
| using Tensorflow.Framework; | using Tensorflow.Framework; | ||||
| using static Tensorflow.Python; | using static Tensorflow.Python; | ||||
| @@ -48,8 +49,6 @@ namespace Tensorflow | |||||
| private int _value_index; | private int _value_index; | ||||
| public int value_index => _value_index; | public int value_index => _value_index; | ||||
| private Status status = new Status(); | |||||
| private TF_DataType _dtype = TF_DataType.DtInvalid; | private TF_DataType _dtype = TF_DataType.DtInvalid; | ||||
| public TF_DataType dtype => _handle == IntPtr.Zero ? _dtype : c_api.TF_TensorType(_handle); | public TF_DataType dtype => _handle == IntPtr.Zero ? _dtype : c_api.TF_TensorType(_handle); | ||||
| @@ -76,6 +75,7 @@ namespace Tensorflow | |||||
| if (_handle == IntPtr.Zero) | if (_handle == IntPtr.Zero) | ||||
| { | { | ||||
| var status = new Status(); | |||||
| c_api.TF_GraphGetTensorShape(op.graph, _as_tf_output(), dims, rank, status); | c_api.TF_GraphGetTensorShape(op.graph, _as_tf_output(), dims, rank, status); | ||||
| status.Check(); | status.Check(); | ||||
| } | } | ||||
| @@ -90,6 +90,8 @@ namespace Tensorflow | |||||
| set | set | ||||
| { | { | ||||
| var status = new Status(); | |||||
| if (value == null) | if (value == null) | ||||
| c_api.TF_GraphSetTensorShape(this.graph, this._as_tf_output(), null, -1, status); | c_api.TF_GraphSetTensorShape(this.graph, this._as_tf_output(), null, -1, status); | ||||
| else | else | ||||
| @@ -131,8 +133,11 @@ namespace Tensorflow | |||||
| { | { | ||||
| if (_handle == IntPtr.Zero) | if (_handle == IntPtr.Zero) | ||||
| { | { | ||||
| var status = new Status(); | |||||
| var output = _as_tf_output(); | var output = _as_tf_output(); | ||||
| return c_api.TF_GraphGetTensorNumDims(op.graph, output, status); | |||||
| int ndim = c_api.TF_GraphGetTensorNumDims(op.graph, output, status); | |||||
| status.Check(); | |||||
| return ndim; | |||||
| } | } | ||||
| else | else | ||||
| { | { | ||||
| @@ -184,6 +189,41 @@ namespace Tensorflow | |||||
| return data; | return data; | ||||
| } | } | ||||
| public unsafe string[] StringData() | |||||
| { | |||||
| // | |||||
| // TF_STRING tensors are encoded with a table of 8-byte offsets followed by TF_StringEncode-encoded bytes. | |||||
| // [offset1, offset2,...,offsetn, s1size, s1bytes, s2size, s2bytes,...,snsize,snbytes] | |||||
| // | |||||
| long size = 1; | |||||
| foreach (var s in TensorShape.Dimensions) | |||||
| size *= s; | |||||
| var buffer = new byte[size][]; | |||||
| var src = c_api.TF_TensorData(_handle); | |||||
| var srcLen = (IntPtr)(src.ToInt64() + (long)bytesize); | |||||
| src += (int)(size * 8); | |||||
| for (int i = 0; i < buffer.Length; i++) | |||||
| { | |||||
| using (var status = new Status()) | |||||
| { | |||||
| IntPtr dst = IntPtr.Zero; | |||||
| UIntPtr dstLen = UIntPtr.Zero; | |||||
| var read = c_api.TF_StringDecode((byte*)src, (UIntPtr)(srcLen.ToInt64() - src.ToInt64()), (byte**)&dst, &dstLen, status); | |||||
| status.Check(true); | |||||
| buffer[i] = new byte[(int)dstLen]; | |||||
| Marshal.Copy(dst, buffer[i], 0, buffer[i].Length); | |||||
| src += (int)read; | |||||
| } | |||||
| } | |||||
| var _str = new string[buffer.Length]; | |||||
| for (int i = 0; i < _str.Length; i++) | |||||
| _str[i] = Encoding.UTF8.GetString(buffer[i]); | |||||
| return _str; | |||||
| } | |||||
| public Tensor MaybeMove() | public Tensor MaybeMove() | ||||
| { | { | ||||
| var tensor = c_api.TF_TensorMaybeMove(_handle); | var tensor = c_api.TF_TensorMaybeMove(_handle); | ||||
| @@ -356,15 +396,14 @@ namespace Tensorflow | |||||
| public void Dispose() | public void Dispose() | ||||
| { | { | ||||
| IntPtr h=IntPtr.Zero; | |||||
| IntPtr h = IntPtr.Zero; | |||||
| lock (this) | lock (this) | ||||
| { | { | ||||
| h = _handle; | h = _handle; | ||||
| _handle=IntPtr.Zero; | |||||
| _handle = IntPtr.Zero; | |||||
| } | } | ||||
| if (h != IntPtr.Zero) | if (h != IntPtr.Zero) | ||||
| c_api.TF_DeleteTensor(_handle); | |||||
| status.Dispose(); | |||||
| c_api.TF_DeleteTensor(h); | |||||
| GC.SuppressFinalize(this); | GC.SuppressFinalize(this); | ||||
| } | } | ||||
| @@ -32,6 +32,9 @@ namespace Tensorflow | |||||
| [DllImport(TensorFlowLibName)] | [DllImport(TensorFlowLibName)] | ||||
| public static extern IntPtr TF_AllocateTensor(TF_DataType dtype, IntPtr dims, int num_dims, UIntPtr len); | public static extern IntPtr TF_AllocateTensor(TF_DataType dtype, IntPtr dims, int num_dims, UIntPtr len); | ||||
| [DllImport(TensorFlowLibName)] | |||||
| public static extern IntPtr TF_AllocateTensor(TF_DataType dtype, long[] dims, int num_dims, UIntPtr len); | |||||
| /// <summary> | /// <summary> | ||||
| /// returns the sizeof() for the underlying type corresponding to the given TF_DataType enum value. | /// returns the sizeof() for the underlying type corresponding to the given TF_DataType enum value. | ||||
| /// </summary> | /// </summary> | ||||
| @@ -150,5 +153,8 @@ namespace Tensorflow | |||||
| /// <returns></returns> | /// <returns></returns> | ||||
| [DllImport(TensorFlowLibName)] | [DllImport(TensorFlowLibName)] | ||||
| public static extern ulong TF_StringDecode(IntPtr src, ulong src_len, IntPtr dst, ref ulong dst_len, IntPtr status); | public static extern ulong TF_StringDecode(IntPtr src, ulong src_len, IntPtr dst, ref ulong dst_len, IntPtr status); | ||||
| [DllImport(TensorFlowLibName)] | |||||
| public static extern unsafe UIntPtr TF_StringDecode(byte* src, UIntPtr src_len, byte** dst, UIntPtr* dst_len, IntPtr status); | |||||
| } | } | ||||
| } | } | ||||
| @@ -16,6 +16,8 @@ Here are some pre-built TensorFlow binaries you can use for each platform: | |||||
| - CPU-only: https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-cpu-windows-x86_64-1.14.0.zip | - CPU-only: https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-cpu-windows-x86_64-1.14.0.zip | ||||
| - GPU-enabled: https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-gpu-windows-x86_64-1.14.0.zip | - GPU-enabled: https://storage.googleapis.com/tensorflow/libtensorflow/libtensorflow-gpu-windows-x86_64-1.14.0.zip | ||||
| ### Run in Linux | ### Run in Linux | ||||
| `Install-Package TensorFlow.NET` | `Install-Package TensorFlow.NET` | ||||
| @@ -31,10 +33,21 @@ sudo apt install libgdiplus | |||||
| More information about [System.Drawing on Linux](<https://www.hanselman.com/blog/HowDoYouUseSystemDrawingInNETCore.aspx>). | More information about [System.Drawing on Linux](<https://www.hanselman.com/blog/HowDoYouUseSystemDrawingInNETCore.aspx>). | ||||
| ### Run in Mac OS | ### Run in Mac OS | ||||
| ### GPU Tensorflow for windows | |||||
| Before running verify you installed CUDA and cuDNN | |||||
| ### Tensorflow GPU for Windows | |||||
| Before running verify you installed CUDA and cuDNN (TensorFlow v1.14 is compatible with CUDA v10.0 and cuDNN v7.4), and make sure the corresponding cuda version is compatible. | |||||
| ```powershell | |||||
| PM> Install-Package SciSharp.TensorFlow.Redist-Windows-GPU | |||||
| ``` | |||||
| ### Build from source for Windows | ### Build from source for Windows | ||||
| @@ -18,7 +18,7 @@ using NumSharp; | |||||
| using System; | using System; | ||||
| using System.Diagnostics; | using System.Diagnostics; | ||||
| using Tensorflow; | using Tensorflow; | ||||
| using TensorFlowNET.Examples.Utility; | |||||
| using Tensorflow.Hub; | |||||
| using static Tensorflow.Python; | using static Tensorflow.Python; | ||||
| namespace TensorFlowNET.Examples | namespace TensorFlowNET.Examples | ||||
| @@ -39,7 +39,7 @@ namespace TensorFlowNET.Examples | |||||
| public int? test_size = null; | public int? test_size = null; | ||||
| public int batch_size = 1024; // The number of samples per batch | public int batch_size = 1024; // The number of samples per batch | ||||
| Datasets<DataSetMnist> mnist; | |||||
| Datasets<MnistDataSet> mnist; | |||||
| NDArray full_data_x; | NDArray full_data_x; | ||||
| int num_steps = 20; // Total steps to train | int num_steps = 20; // Total steps to train | ||||
| int k = 25; // The number of clusters | int k = 25; // The number of clusters | ||||
| @@ -62,19 +62,31 @@ namespace TensorFlowNET.Examples | |||||
| public void PrepareData() | public void PrepareData() | ||||
| { | { | ||||
| mnist = MNIST.read_data_sets("mnist", one_hot: true, train_size: train_size, validation_size:validation_size, test_size:test_size); | |||||
| full_data_x = mnist.train.data; | |||||
| var loader = new MnistModelLoader(); | |||||
| var setting = new ModelLoadSetting | |||||
| { | |||||
| TrainDir = ".resources/mnist", | |||||
| OneHot = true, | |||||
| TrainSize = train_size, | |||||
| ValidationSize = validation_size, | |||||
| TestSize = test_size | |||||
| }; | |||||
| mnist = loader.LoadAsync(setting).Result; | |||||
| full_data_x = mnist.Train.Data; | |||||
| // download graph meta data | // download graph meta data | ||||
| string url = "https://raw.githubusercontent.com/SciSharp/TensorFlow.NET/master/graph/kmeans.meta"; | string url = "https://raw.githubusercontent.com/SciSharp/TensorFlow.NET/master/graph/kmeans.meta"; | ||||
| Web.Download(url, "graph", "kmeans.meta"); | |||||
| loader.DownloadAsync(url, ".resources/graph", "kmeans.meta").Wait(); | |||||
| } | } | ||||
| public Graph ImportGraph() | public Graph ImportGraph() | ||||
| { | { | ||||
| var graph = tf.Graph().as_default(); | var graph = tf.Graph().as_default(); | ||||
| tf.train.import_meta_graph("graph/kmeans.meta"); | |||||
| tf.train.import_meta_graph(".resources/graph/kmeans.meta"); | |||||
| return graph; | return graph; | ||||
| } | } | ||||
| @@ -132,7 +144,7 @@ namespace TensorFlowNET.Examples | |||||
| sw.Start(); | sw.Start(); | ||||
| foreach (var i in range(idx.Length)) | foreach (var i in range(idx.Length)) | ||||
| { | { | ||||
| var x = mnist.train.labels[i]; | |||||
| var x = mnist.Train.Labels[i]; | |||||
| counts[idx[i]] += x; | counts[idx[i]] += x; | ||||
| } | } | ||||
| @@ -153,7 +165,7 @@ namespace TensorFlowNET.Examples | |||||
| var accuracy_op = tf.reduce_mean(cast); | var accuracy_op = tf.reduce_mean(cast); | ||||
| // Test Model | // Test Model | ||||
| var (test_x, test_y) = (mnist.test.data, mnist.test.labels); | |||||
| var (test_x, test_y) = (mnist.Test.Data, mnist.Test.Labels); | |||||
| result = sess.run(accuracy_op, new FeedItem(X, test_x), new FeedItem(Y, test_y)); | result = sess.run(accuracy_op, new FeedItem(X, test_x), new FeedItem(Y, test_y)); | ||||
| accuray_test = result; | accuray_test = result; | ||||
| print($"Test Accuracy: {accuray_test}"); | print($"Test Accuracy: {accuray_test}"); | ||||
| @@ -19,7 +19,7 @@ using System; | |||||
| using System.Diagnostics; | using System.Diagnostics; | ||||
| using System.IO; | using System.IO; | ||||
| using Tensorflow; | using Tensorflow; | ||||
| using TensorFlowNET.Examples.Utility; | |||||
| using Tensorflow.Hub; | |||||
| using static Tensorflow.Python; | using static Tensorflow.Python; | ||||
| namespace TensorFlowNET.Examples | namespace TensorFlowNET.Examples | ||||
| @@ -45,7 +45,7 @@ namespace TensorFlowNET.Examples | |||||
| private float learning_rate = 0.01f; | private float learning_rate = 0.01f; | ||||
| private int display_step = 1; | private int display_step = 1; | ||||
| Datasets<DataSetMnist> mnist; | |||||
| Datasets<MnistDataSet> mnist; | |||||
| public bool Run() | public bool Run() | ||||
| { | { | ||||
| @@ -84,11 +84,11 @@ namespace TensorFlowNET.Examples | |||||
| sw.Start(); | sw.Start(); | ||||
| var avg_cost = 0.0f; | var avg_cost = 0.0f; | ||||
| var total_batch = mnist.train.num_examples / batch_size; | |||||
| var total_batch = mnist.Train.NumOfExamples / batch_size; | |||||
| // Loop over all batches | // Loop over all batches | ||||
| foreach (var i in range(total_batch)) | foreach (var i in range(total_batch)) | ||||
| { | { | ||||
| var (batch_xs, batch_ys) = mnist.train.next_batch(batch_size); | |||||
| var (batch_xs, batch_ys) = mnist.Train.GetNextBatch(batch_size); | |||||
| // Run optimization op (backprop) and cost op (to get loss value) | // Run optimization op (backprop) and cost op (to get loss value) | ||||
| var result = sess.run(new object[] { optimizer, cost }, | var result = sess.run(new object[] { optimizer, cost }, | ||||
| new FeedItem(x, batch_xs), | new FeedItem(x, batch_xs), | ||||
| @@ -115,7 +115,7 @@ namespace TensorFlowNET.Examples | |||||
| var correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1)); | var correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1)); | ||||
| // Calculate accuracy | // Calculate accuracy | ||||
| var accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)); | var accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)); | ||||
| float acc = accuracy.eval(new FeedItem(x, mnist.test.data), new FeedItem(y, mnist.test.labels)); | |||||
| float acc = accuracy.eval(new FeedItem(x, mnist.Test.Data), new FeedItem(y, mnist.Test.Labels)); | |||||
| print($"Accuracy: {acc.ToString("F4")}"); | print($"Accuracy: {acc.ToString("F4")}"); | ||||
| return acc > 0.9; | return acc > 0.9; | ||||
| @@ -124,23 +124,23 @@ namespace TensorFlowNET.Examples | |||||
| public void PrepareData() | public void PrepareData() | ||||
| { | { | ||||
| mnist = MNIST.read_data_sets("mnist", one_hot: true, train_size: train_size, validation_size: validation_size, test_size: test_size); | |||||
| mnist = MnistModelLoader.LoadAsync(".resources/mnist", oneHot: true, trainSize: train_size, validationSize: validation_size, testSize: test_size).Result; | |||||
| } | } | ||||
| public void SaveModel(Session sess) | public void SaveModel(Session sess) | ||||
| { | { | ||||
| var saver = tf.train.Saver(); | var saver = tf.train.Saver(); | ||||
| var save_path = saver.save(sess, "logistic_regression/model.ckpt"); | |||||
| tf.train.write_graph(sess.graph, "logistic_regression", "model.pbtxt", as_text: true); | |||||
| var save_path = saver.save(sess, ".resources/logistic_regression/model.ckpt"); | |||||
| tf.train.write_graph(sess.graph, ".resources/logistic_regression", "model.pbtxt", as_text: true); | |||||
| FreezeGraph.freeze_graph(input_graph: "logistic_regression/model.pbtxt", | |||||
| FreezeGraph.freeze_graph(input_graph: ".resources/logistic_regression/model.pbtxt", | |||||
| input_saver: "", | input_saver: "", | ||||
| input_binary: false, | input_binary: false, | ||||
| input_checkpoint: "logistic_regression/model.ckpt", | |||||
| input_checkpoint: ".resources/logistic_regression/model.ckpt", | |||||
| output_node_names: "Softmax", | output_node_names: "Softmax", | ||||
| restore_op_name: "save/restore_all", | restore_op_name: "save/restore_all", | ||||
| filename_tensor_name: "save/Const:0", | filename_tensor_name: "save/Const:0", | ||||
| output_graph: "logistic_regression/model.pb", | |||||
| output_graph: ".resources/logistic_regression/model.pb", | |||||
| clear_devices: true, | clear_devices: true, | ||||
| initializer_nodes: ""); | initializer_nodes: ""); | ||||
| } | } | ||||
| @@ -148,7 +148,7 @@ namespace TensorFlowNET.Examples | |||||
| public void Predict(Session sess) | public void Predict(Session sess) | ||||
| { | { | ||||
| var graph = new Graph().as_default(); | var graph = new Graph().as_default(); | ||||
| graph.Import(Path.Join("logistic_regression", "model.pb")); | |||||
| graph.Import(Path.Join(".resources/logistic_regression", "model.pb")); | |||||
| // restoring the model | // restoring the model | ||||
| // var saver = tf.train.import_meta_graph("logistic_regression/tensorflowModel.ckpt.meta"); | // var saver = tf.train.import_meta_graph("logistic_regression/tensorflowModel.ckpt.meta"); | ||||
| @@ -159,7 +159,7 @@ namespace TensorFlowNET.Examples | |||||
| var input = x.outputs[0]; | var input = x.outputs[0]; | ||||
| // predict | // predict | ||||
| var (batch_xs, batch_ys) = mnist.train.next_batch(10); | |||||
| var (batch_xs, batch_ys) = mnist.Train.GetNextBatch(10); | |||||
| var results = sess.run(output, new FeedItem(input, batch_xs[np.arange(1)])); | var results = sess.run(output, new FeedItem(input, batch_xs[np.arange(1)])); | ||||
| if (results.argmax() == (batch_ys[0] as NDArray).argmax()) | if (results.argmax() == (batch_ys[0] as NDArray).argmax()) | ||||
| @@ -17,7 +17,7 @@ | |||||
| using NumSharp; | using NumSharp; | ||||
| using System; | using System; | ||||
| using Tensorflow; | using Tensorflow; | ||||
| using TensorFlowNET.Examples.Utility; | |||||
| using Tensorflow.Hub; | |||||
| using static Tensorflow.Python; | using static Tensorflow.Python; | ||||
| namespace TensorFlowNET.Examples | namespace TensorFlowNET.Examples | ||||
| @@ -31,7 +31,7 @@ namespace TensorFlowNET.Examples | |||||
| { | { | ||||
| public bool Enabled { get; set; } = true; | public bool Enabled { get; set; } = true; | ||||
| public string Name => "Nearest Neighbor"; | public string Name => "Nearest Neighbor"; | ||||
| Datasets<DataSetMnist> mnist; | |||||
| Datasets<MnistDataSet> mnist; | |||||
| NDArray Xtr, Ytr, Xte, Yte; | NDArray Xtr, Ytr, Xte, Yte; | ||||
| public int? TrainSize = null; | public int? TrainSize = null; | ||||
| public int ValidationSize = 5000; | public int ValidationSize = 5000; | ||||
| @@ -84,10 +84,10 @@ namespace TensorFlowNET.Examples | |||||
| public void PrepareData() | public void PrepareData() | ||||
| { | { | ||||
| mnist = MNIST.read_data_sets("mnist", one_hot: true, train_size: TrainSize, validation_size:ValidationSize, test_size:TestSize); | |||||
| mnist = MnistModelLoader.LoadAsync(".resources/mnist", oneHot: true, trainSize: TrainSize, validationSize: ValidationSize, testSize: TestSize).Result; | |||||
| // In this example, we limit mnist data | // In this example, we limit mnist data | ||||
| (Xtr, Ytr) = mnist.train.next_batch(TrainSize==null ? 5000 : TrainSize.Value / 100); // 5000 for training (nn candidates) | |||||
| (Xte, Yte) = mnist.test.next_batch(TestSize==null ? 200 : TestSize.Value / 100); // 200 for testing | |||||
| (Xtr, Ytr) = mnist.Train.GetNextBatch(TrainSize == null ? 5000 : TrainSize.Value / 100); // 5000 for training (nn candidates) | |||||
| (Xte, Yte) = mnist.Test.GetNextBatch(TestSize == null ? 200 : TestSize.Value / 100); // 200 for testing | |||||
| } | } | ||||
| public Graph ImportGraph() | public Graph ImportGraph() | ||||
| @@ -0,0 +1,74 @@ | |||||
| /***************************************************************************** | |||||
| Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. | |||||
| Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| you may not use this file except in compliance with the License. | |||||
| You may obtain a copy of the License at | |||||
| http://www.apache.org/licenses/LICENSE-2.0 | |||||
| Unless required by applicable law or agreed to in writing, software | |||||
| distributed under the License is distributed on an "AS IS" BASIS, | |||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| See the License for the specific language governing permissions and | |||||
| limitations under the License. | |||||
| ******************************************************************************/ | |||||
| using System; | |||||
| using System.Collections.Generic; | |||||
| using System.Text; | |||||
| using Tensorflow; | |||||
| using TensorFlowDatasets; | |||||
| using static Tensorflow.Python; | |||||
| namespace TensorFlowNET.Examples | |||||
| { | |||||
| /// <summary> | |||||
| /// https://www.tensorflow.org/tutorials/images/deep_cnn | |||||
| /// </summary> | |||||
| public class CIFAR10_CNN : IExample | |||||
| { | |||||
| public bool Enabled { get; set; } = true; | |||||
| public bool IsImportingGraph { get; set; } = false; | |||||
| public string Name => "CIFAR-10 CNN"; | |||||
| public bool Run() | |||||
| { | |||||
| PrepareData(); | |||||
| return true; | |||||
| } | |||||
| public Graph BuildGraph() | |||||
| { | |||||
| throw new NotImplementedException(); | |||||
| } | |||||
| public Graph ImportGraph() | |||||
| { | |||||
| throw new NotImplementedException(); | |||||
| } | |||||
| public void Predict(Session sess) | |||||
| { | |||||
| throw new NotImplementedException(); | |||||
| } | |||||
| public void PrepareData() | |||||
| { | |||||
| var tfds = new DatasetBuilder(); | |||||
| tfds.download_and_prepare(); | |||||
| } | |||||
| public void Test(Session sess) | |||||
| { | |||||
| throw new NotImplementedException(); | |||||
| } | |||||
| public void Train(Session sess) | |||||
| { | |||||
| throw new NotImplementedException(); | |||||
| } | |||||
| } | |||||
| } | |||||
| @@ -16,11 +16,12 @@ | |||||
| using NumSharp; | using NumSharp; | ||||
| using System; | using System; | ||||
| using System.Diagnostics; | |||||
| using Tensorflow; | using Tensorflow; | ||||
| using TensorFlowNET.Examples.Utility; | |||||
| using Tensorflow.Hub; | |||||
| using static Tensorflow.Python; | using static Tensorflow.Python; | ||||
| namespace TensorFlowNET.Examples.ImageProcess | |||||
| namespace TensorFlowNET.Examples | |||||
| { | { | ||||
| /// <summary> | /// <summary> | ||||
| /// Convolutional Neural Network classifier for Hand Written Digits | /// Convolutional Neural Network classifier for Hand Written Digits | ||||
| @@ -45,7 +46,7 @@ namespace TensorFlowNET.Examples.ImageProcess | |||||
| int epochs = 5; // accuracy > 98% | int epochs = 5; // accuracy > 98% | ||||
| int batch_size = 100; | int batch_size = 100; | ||||
| float learning_rate = 0.001f; | float learning_rate = 0.001f; | ||||
| Datasets<DataSetMnist> mnist; | |||||
| Datasets<MnistDataSet> mnist; | |||||
| // Network configuration | // Network configuration | ||||
| // 1st Convolutional Layer | // 1st Convolutional Layer | ||||
| @@ -144,6 +145,8 @@ namespace TensorFlowNET.Examples.ImageProcess | |||||
| float loss_val = 100.0f; | float loss_val = 100.0f; | ||||
| float accuracy_val = 0f; | float accuracy_val = 0f; | ||||
| var sw = new Stopwatch(); | |||||
| sw.Start(); | |||||
| foreach (var epoch in range(epochs)) | foreach (var epoch in range(epochs)) | ||||
| { | { | ||||
| print($"Training epoch: {epoch + 1}"); | print($"Training epoch: {epoch + 1}"); | ||||
| @@ -165,7 +168,8 @@ namespace TensorFlowNET.Examples.ImageProcess | |||||
| var result = sess.run(new[] { loss, accuracy }, new FeedItem(x, x_batch), new FeedItem(y, y_batch)); | var result = sess.run(new[] { loss, accuracy }, new FeedItem(x, x_batch), new FeedItem(y, y_batch)); | ||||
| loss_val = result[0]; | loss_val = result[0]; | ||||
| accuracy_val = result[1]; | accuracy_val = result[1]; | ||||
| print($"iter {iteration.ToString("000")}: Loss={loss_val.ToString("0.0000")}, Training Accuracy={accuracy_val.ToString("P")}"); | |||||
| print($"iter {iteration.ToString("000")}: Loss={loss_val.ToString("0.0000")}, Training Accuracy={accuracy_val.ToString("P")} {sw.ElapsedMilliseconds}ms"); | |||||
| sw.Restart(); | |||||
| } | } | ||||
| } | } | ||||
| @@ -306,14 +310,14 @@ namespace TensorFlowNET.Examples.ImageProcess | |||||
| public void PrepareData() | public void PrepareData() | ||||
| { | { | ||||
| mnist = MNIST.read_data_sets("mnist", one_hot: true); | |||||
| (x_train, y_train) = Reformat(mnist.train.data, mnist.train.labels); | |||||
| (x_valid, y_valid) = Reformat(mnist.validation.data, mnist.validation.labels); | |||||
| (x_test, y_test) = Reformat(mnist.test.data, mnist.test.labels); | |||||
| mnist = MnistModelLoader.LoadAsync(".resources/mnist", oneHot: true).Result; | |||||
| (x_train, y_train) = Reformat(mnist.Train.Data, mnist.Train.Labels); | |||||
| (x_valid, y_valid) = Reformat(mnist.Validation.Data, mnist.Validation.Labels); | |||||
| (x_test, y_test) = Reformat(mnist.Test.Data, mnist.Test.Labels); | |||||
| print("Size of:"); | print("Size of:"); | ||||
| print($"- Training-set:\t\t{len(mnist.train.data)}"); | |||||
| print($"- Validation-set:\t{len(mnist.validation.data)}"); | |||||
| print($"- Training-set:\t\t{len(mnist.Train.Data)}"); | |||||
| print($"- Validation-set:\t{len(mnist.Validation.Data)}"); | |||||
| } | } | ||||
| /// <summary> | /// <summary> | ||||
| @@ -17,10 +17,10 @@ | |||||
| using NumSharp; | using NumSharp; | ||||
| using System; | using System; | ||||
| using Tensorflow; | using Tensorflow; | ||||
| using TensorFlowNET.Examples.Utility; | |||||
| using Tensorflow.Hub; | |||||
| using static Tensorflow.Python; | using static Tensorflow.Python; | ||||
| namespace TensorFlowNET.Examples.ImageProcess | |||||
| namespace TensorFlowNET.Examples | |||||
| { | { | ||||
| /// <summary> | /// <summary> | ||||
| /// Neural Network classifier for Hand Written Digits | /// Neural Network classifier for Hand Written Digits | ||||
| @@ -44,7 +44,7 @@ namespace TensorFlowNET.Examples.ImageProcess | |||||
| int batch_size = 100; | int batch_size = 100; | ||||
| float learning_rate = 0.001f; | float learning_rate = 0.001f; | ||||
| int h1 = 200; // number of nodes in the 1st hidden layer | int h1 = 200; // number of nodes in the 1st hidden layer | ||||
| Datasets<DataSetMnist> mnist; | |||||
| Datasets<MnistDataSet> mnist; | |||||
| Tensor x, y; | Tensor x, y; | ||||
| Tensor loss, accuracy; | Tensor loss, accuracy; | ||||
| @@ -121,13 +121,13 @@ namespace TensorFlowNET.Examples.ImageProcess | |||||
| public void PrepareData() | public void PrepareData() | ||||
| { | { | ||||
| mnist = MNIST.read_data_sets("mnist", one_hot: true); | |||||
| mnist = MnistModelLoader.LoadAsync(".resources/mnist", oneHot: true).Result; | |||||
| } | } | ||||
| public void Train(Session sess) | public void Train(Session sess) | ||||
| { | { | ||||
| // Number of training iterations in each epoch | // Number of training iterations in each epoch | ||||
| var num_tr_iter = mnist.train.labels.len / batch_size; | |||||
| var num_tr_iter = mnist.Train.Labels.len / batch_size; | |||||
| var init = tf.global_variables_initializer(); | var init = tf.global_variables_initializer(); | ||||
| sess.run(init); | sess.run(init); | ||||
| @@ -139,13 +139,13 @@ namespace TensorFlowNET.Examples.ImageProcess | |||||
| { | { | ||||
| print($"Training epoch: {epoch + 1}"); | print($"Training epoch: {epoch + 1}"); | ||||
| // Randomly shuffle the training data at the beginning of each epoch | // Randomly shuffle the training data at the beginning of each epoch | ||||
| var (x_train, y_train) = randomize(mnist.train.data, mnist.train.labels); | |||||
| var (x_train, y_train) = mnist.Randomize(mnist.Train.Data, mnist.Train.Labels); | |||||
| foreach (var iteration in range(num_tr_iter)) | foreach (var iteration in range(num_tr_iter)) | ||||
| { | { | ||||
| var start = iteration * batch_size; | var start = iteration * batch_size; | ||||
| var end = (iteration + 1) * batch_size; | var end = (iteration + 1) * batch_size; | ||||
| var (x_batch, y_batch) = get_next_batch(x_train, y_train, start, end); | |||||
| var (x_batch, y_batch) = mnist.GetNextBatch(x_train, y_train, start, end); | |||||
| // Run optimization op (backprop) | // Run optimization op (backprop) | ||||
| sess.run(optimizer, new FeedItem(x, x_batch), new FeedItem(y, y_batch)); | sess.run(optimizer, new FeedItem(x, x_batch), new FeedItem(y, y_batch)); | ||||
| @@ -161,7 +161,8 @@ namespace TensorFlowNET.Examples.ImageProcess | |||||
| } | } | ||||
| // Run validation after every epoch | // Run validation after every epoch | ||||
| var results1 = sess.run(new[] { loss, accuracy }, new FeedItem(x, mnist.validation.data), new FeedItem(y, mnist.validation.labels)); | |||||
| var results1 = sess.run(new[] { loss, accuracy }, new FeedItem(x, mnist.Validation.Data), new FeedItem(y, mnist.Validation.Labels)); | |||||
| loss_val = results1[0]; | loss_val = results1[0]; | ||||
| accuracy_val = results1[1]; | accuracy_val = results1[1]; | ||||
| print("---------------------------------------------------------"); | print("---------------------------------------------------------"); | ||||
| @@ -172,35 +173,12 @@ namespace TensorFlowNET.Examples.ImageProcess | |||||
| public void Test(Session sess) | public void Test(Session sess) | ||||
| { | { | ||||
| var result = sess.run(new[] { loss, accuracy }, new FeedItem(x, mnist.test.data), new FeedItem(y, mnist.test.labels)); | |||||
| var result = sess.run(new[] { loss, accuracy }, new FeedItem(x, mnist.Test.Data), new FeedItem(y, mnist.Test.Labels)); | |||||
| loss_test = result[0]; | loss_test = result[0]; | ||||
| accuracy_test = result[1]; | accuracy_test = result[1]; | ||||
| print("---------------------------------------------------------"); | print("---------------------------------------------------------"); | ||||
| print($"Test loss: {loss_test.ToString("0.0000")}, test accuracy: {accuracy_test.ToString("P")}"); | print($"Test loss: {loss_test.ToString("0.0000")}, test accuracy: {accuracy_test.ToString("P")}"); | ||||
| print("---------------------------------------------------------"); | print("---------------------------------------------------------"); | ||||
| } | } | ||||
| private (NDArray, NDArray) randomize(NDArray x, NDArray y) | |||||
| { | |||||
| var perm = np.random.permutation(y.shape[0]); | |||||
| np.random.shuffle(perm); | |||||
| return (mnist.train.data[perm], mnist.train.labels[perm]); | |||||
| } | |||||
| /// <summary> | |||||
| /// selects a few number of images determined by the batch_size variable (if you don't know why, read about Stochastic Gradient Method) | |||||
| /// </summary> | |||||
| /// <param name="x"></param> | |||||
| /// <param name="y"></param> | |||||
| /// <param name="start"></param> | |||||
| /// <param name="end"></param> | |||||
| /// <returns></returns> | |||||
| private (NDArray, NDArray) get_next_batch(NDArray x, NDArray y, int start, int end) | |||||
| { | |||||
| var x_batch = x[$"{start}:{end}"]; | |||||
| var y_batch = y[$"{start}:{end}"]; | |||||
| return (x_batch, y_batch); | |||||
| } | |||||
| } | } | ||||
| } | } | ||||
| @@ -17,10 +17,10 @@ | |||||
| using NumSharp; | using NumSharp; | ||||
| using System; | using System; | ||||
| using Tensorflow; | using Tensorflow; | ||||
| using TensorFlowNET.Examples.Utility; | |||||
| using Tensorflow.Hub; | |||||
| using static Tensorflow.Python; | using static Tensorflow.Python; | ||||
| namespace TensorFlowNET.Examples.ImageProcess | |||||
| namespace TensorFlowNET.Examples | |||||
| { | { | ||||
| /// <summary> | /// <summary> | ||||
| /// Recurrent Neural Network for handwritten digits MNIST. | /// Recurrent Neural Network for handwritten digits MNIST. | ||||
| @@ -45,7 +45,7 @@ namespace TensorFlowNET.Examples.ImageProcess | |||||
| int n_inputs = 28; | int n_inputs = 28; | ||||
| int n_outputs = 10; | int n_outputs = 10; | ||||
| Datasets<DataSetMnist> mnist; | |||||
| Datasets<MnistDataSet> mnist; | |||||
| Tensor x, y; | Tensor x, y; | ||||
| Tensor loss, accuracy, cls_prediction; | Tensor loss, accuracy, cls_prediction; | ||||
| @@ -143,15 +143,15 @@ namespace TensorFlowNET.Examples.ImageProcess | |||||
| public void PrepareData() | public void PrepareData() | ||||
| { | { | ||||
| mnist = MNIST.read_data_sets("mnist", one_hot: true); | |||||
| (x_train, y_train) = (mnist.train.data, mnist.train.labels); | |||||
| (x_valid, y_valid) = (mnist.validation.data, mnist.validation.labels); | |||||
| (x_test, y_test) = (mnist.test.data, mnist.test.labels); | |||||
| mnist = MnistModelLoader.LoadAsync(".resources/mnist", oneHot: true).Result; | |||||
| (x_train, y_train) = (mnist.Train.Data, mnist.Train.Labels); | |||||
| (x_valid, y_valid) = (mnist.Validation.Data, mnist.Validation.Labels); | |||||
| (x_test, y_test) = (mnist.Test.Data, mnist.Test.Labels); | |||||
| print("Size of:"); | print("Size of:"); | ||||
| print($"- Training-set:\t\t{len(mnist.train.data)}"); | |||||
| print($"- Validation-set:\t{len(mnist.validation.data)}"); | |||||
| print($"- Test-set:\t\t{len(mnist.test.data)}"); | |||||
| print($"- Training-set:\t\t{len(mnist.Train.Data)}"); | |||||
| print($"- Validation-set:\t{len(mnist.Validation.Data)}"); | |||||
| print($"- Test-set:\t\t{len(mnist.Test.Data)}"); | |||||
| } | } | ||||
| public Graph ImportGraph() => throw new NotImplementedException(); | public Graph ImportGraph() => throw new NotImplementedException(); | ||||
| @@ -4,7 +4,7 @@ using Tensorflow; | |||||
| using TensorFlowNET.Examples.Utility; | using TensorFlowNET.Examples.Utility; | ||||
| using static Tensorflow.Python; | using static Tensorflow.Python; | ||||
| namespace TensorFlowNET.Examples.ImageProcess | |||||
| namespace TensorFlowNET.Examples | |||||
| { | { | ||||
| /// <summary> | /// <summary> | ||||
| /// This example removes the background from an input image. | /// This example removes the background from an input image. | ||||
| @@ -25,7 +25,7 @@ using Tensorflow; | |||||
| using TensorFlowNET.Examples.Utility; | using TensorFlowNET.Examples.Utility; | ||||
| using static Tensorflow.Python; | using static Tensorflow.Python; | ||||
| namespace TensorFlowNET.Examples.ImageProcess | |||||
| namespace TensorFlowNET.Examples | |||||
| { | { | ||||
| /// <summary> | /// <summary> | ||||
| /// In this tutorial, we will reuse the feature extraction capabilities from powerful image classifiers trained on ImageNet | /// In this tutorial, we will reuse the feature extraction capabilities from powerful image classifiers trained on ImageNet | ||||
| @@ -83,10 +83,10 @@ namespace TensorFlowNET.Examples.ImageProcess | |||||
| #region For debug purpose | #region For debug purpose | ||||
| // predict images | // predict images | ||||
| Predict(null); | |||||
| // Predict(null); | |||||
| // load saved pb and test new images. | // load saved pb and test new images. | ||||
| Test(null); | |||||
| // Test(null); | |||||
| #endregion | #endregion | ||||
| @@ -0,0 +1,24 @@ | |||||
| <Project Sdk="Microsoft.NET.Sdk"> | |||||
| <PropertyGroup> | |||||
| <OutputType>Exe</OutputType> | |||||
| <TargetFramework>netcoreapp2.2</TargetFramework> | |||||
| <GeneratePackageOnBuild>false</GeneratePackageOnBuild> | |||||
| </PropertyGroup> | |||||
| <ItemGroup> | |||||
| <PackageReference Include="Colorful.Console" Version="1.2.9" /> | |||||
| <PackageReference Include="Newtonsoft.Json" Version="12.0.2" /> | |||||
| <PackageReference Include="SciSharp.TensorFlow.Redist-Windows-GPU" Version="1.14.0" /> | |||||
| <PackageReference Include="SharpZipLib" Version="1.1.0" /> | |||||
| <PackageReference Include="System.Drawing.Common" Version="4.5.1" /> | |||||
| </ItemGroup> | |||||
| <ItemGroup> | |||||
| <ProjectReference Include="..\..\src\KerasNET.Core\Keras.Core.csproj" /> | |||||
| <ProjectReference Include="..\..\src\TensorFlowDatasets\TensorFlowDatasets.csproj" /> | |||||
| <ProjectReference Include="..\..\src\TensorFlowNET.Core\TensorFlowNET.Core.csproj" /> | |||||
| <ProjectReference Include="..\..\src\TensorFlowText\TensorFlowText.csproj" /> | |||||
| <ProjectReference Include="..\..\src\TensorFlowHub\TensorFlowHub.csproj" /> | |||||
| </ItemGroup> | |||||
| </Project> | |||||
| @@ -16,7 +16,9 @@ | |||||
| <ItemGroup> | <ItemGroup> | ||||
| <ProjectReference Include="..\..\src\KerasNET.Core\Keras.Core.csproj" /> | <ProjectReference Include="..\..\src\KerasNET.Core\Keras.Core.csproj" /> | ||||
| <ProjectReference Include="..\..\src\TensorFlowDatasets\TensorFlowDatasets.csproj" /> | |||||
| <ProjectReference Include="..\..\src\TensorFlowNET.Core\TensorFlowNET.Core.csproj" /> | <ProjectReference Include="..\..\src\TensorFlowNET.Core\TensorFlowNET.Core.csproj" /> | ||||
| <ProjectReference Include="..\..\src\TensorFlowText\TensorFlowText.csproj" /> | <ProjectReference Include="..\..\src\TensorFlowText\TensorFlowText.csproj" /> | ||||
| <ProjectReference Include="..\..\src\TensorFlowHub\TensorFlowHub.csproj" /> | |||||
| </ItemGroup> | </ItemGroup> | ||||
| </Project> | </Project> | ||||
| @@ -1,95 +0,0 @@ | |||||
| /***************************************************************************** | |||||
| Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. | |||||
| Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| you may not use this file except in compliance with the License. | |||||
| You may obtain a copy of the License at | |||||
| http://www.apache.org/licenses/LICENSE-2.0 | |||||
| Unless required by applicable law or agreed to in writing, software | |||||
| distributed under the License is distributed on an "AS IS" BASIS, | |||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| See the License for the specific language governing permissions and | |||||
| limitations under the License. | |||||
| ******************************************************************************/ | |||||
| using NumSharp; | |||||
| using Tensorflow; | |||||
| namespace TensorFlowNET.Examples.Utility | |||||
| { | |||||
| public class DataSetMnist : IDataSet | |||||
| { | |||||
| public int num_examples { get; } | |||||
| public int epochs_completed { get; private set; } | |||||
| public int index_in_epoch { get; private set; } | |||||
| public NDArray data { get; private set; } | |||||
| public NDArray labels { get; private set; } | |||||
| public DataSetMnist(NDArray images, NDArray labels, TF_DataType dtype, bool reshape) | |||||
| { | |||||
| num_examples = images.shape[0]; | |||||
| images = images.reshape(images.shape[0], images.shape[1] * images.shape[2]); | |||||
| images.astype(dtype.as_numpy_datatype()); | |||||
| images = np.multiply(images, 1.0f / 255.0f); | |||||
| labels.astype(dtype.as_numpy_datatype()); | |||||
| data = images; | |||||
| this.labels = labels; | |||||
| epochs_completed = 0; | |||||
| index_in_epoch = 0; | |||||
| } | |||||
| public (NDArray, NDArray) next_batch(int batch_size, bool fake_data = false, bool shuffle = true) | |||||
| { | |||||
| var start = index_in_epoch; | |||||
| // Shuffle for the first epoch | |||||
| if(epochs_completed == 0 && start == 0 && shuffle) | |||||
| { | |||||
| var perm0 = np.arange(num_examples); | |||||
| np.random.shuffle(perm0); | |||||
| data = data[perm0]; | |||||
| labels = labels[perm0]; | |||||
| } | |||||
| // Go to the next epoch | |||||
| if (start + batch_size > num_examples) | |||||
| { | |||||
| // Finished epoch | |||||
| epochs_completed += 1; | |||||
| // Get the rest examples in this epoch | |||||
| var rest_num_examples = num_examples - start; | |||||
| //var images_rest_part = _images[np.arange(start, _num_examples)]; | |||||
| //var labels_rest_part = _labels[np.arange(start, _num_examples)]; | |||||
| // Shuffle the data | |||||
| if (shuffle) | |||||
| { | |||||
| var perm = np.arange(num_examples); | |||||
| np.random.shuffle(perm); | |||||
| data = data[perm]; | |||||
| labels = labels[perm]; | |||||
| } | |||||
| start = 0; | |||||
| index_in_epoch = batch_size - rest_num_examples; | |||||
| var end = index_in_epoch; | |||||
| var images_new_part = data[np.arange(start, end)]; | |||||
| var labels_new_part = labels[np.arange(start, end)]; | |||||
| /*return (np.concatenate(new float[][] { images_rest_part.Data<float>(), images_new_part.Data<float>() }, axis: 0), | |||||
| np.concatenate(new float[][] { labels_rest_part.Data<float>(), labels_new_part.Data<float>() }, axis: 0));*/ | |||||
| return (images_new_part, labels_new_part); | |||||
| } | |||||
| else | |||||
| { | |||||
| index_in_epoch += batch_size; | |||||
| var end = index_in_epoch; | |||||
| return (data[np.arange(start, end)], labels[np.arange(start, end)]); | |||||
| } | |||||
| } | |||||
| } | |||||
| } | |||||
| @@ -1,46 +0,0 @@ | |||||
| using NumSharp; | |||||
| namespace TensorFlowNET.Examples.Utility | |||||
| { | |||||
| public class Datasets<T> where T : IDataSet | |||||
| { | |||||
| private T _train; | |||||
| public T train => _train; | |||||
| private T _validation; | |||||
| public T validation => _validation; | |||||
| private T _test; | |||||
| public T test => _test; | |||||
| public Datasets(T train, T validation, T test) | |||||
| { | |||||
| _train = train; | |||||
| _validation = validation; | |||||
| _test = test; | |||||
| } | |||||
| public (NDArray, NDArray) Randomize(NDArray x, NDArray y) | |||||
| { | |||||
| var perm = np.random.permutation(y.shape[0]); | |||||
| np.random.shuffle(perm); | |||||
| return (x[perm], y[perm]); | |||||
| } | |||||
| /// <summary> | |||||
| /// selects a few number of images determined by the batch_size variable (if you don't know why, read about Stochastic Gradient Method) | |||||
| /// </summary> | |||||
| /// <param name="x"></param> | |||||
| /// <param name="y"></param> | |||||
| /// <param name="start"></param> | |||||
| /// <param name="end"></param> | |||||
| /// <returns></returns> | |||||
| public (NDArray, NDArray) GetNextBatch(NDArray x, NDArray y, int start, int end) | |||||
| { | |||||
| var x_batch = x[$"{start}:{end}"]; | |||||
| var y_batch = y[$"{start}:{end}"]; | |||||
| return (x_batch, y_batch); | |||||
| } | |||||
| } | |||||
| } | |||||
| @@ -1,10 +0,0 @@ | |||||
| using NumSharp; | |||||
| namespace TensorFlowNET.Examples.Utility | |||||
| { | |||||
| public interface IDataSet | |||||
| { | |||||
| NDArray data { get; } | |||||
| NDArray labels { get; } | |||||
| } | |||||
| } | |||||
| @@ -1,131 +0,0 @@ | |||||
| /***************************************************************************** | |||||
| Copyright 2018 The TensorFlow.NET Authors. All Rights Reserved. | |||||
| Licensed under the Apache License, Version 2.0 (the "License"); | |||||
| you may not use this file except in compliance with the License. | |||||
| You may obtain a copy of the License at | |||||
| http://www.apache.org/licenses/LICENSE-2.0 | |||||
| Unless required by applicable law or agreed to in writing, software | |||||
| distributed under the License is distributed on an "AS IS" BASIS, | |||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |||||
| See the License for the specific language governing permissions and | |||||
| limitations under the License. | |||||
| ******************************************************************************/ | |||||
| using NumSharp; | |||||
| using System; | |||||
| using System.IO; | |||||
| using Tensorflow; | |||||
| namespace TensorFlowNET.Examples.Utility | |||||
| { | |||||
| public class MNIST | |||||
| { | |||||
| private const string DEFAULT_SOURCE_URL = "https://storage.googleapis.com/cvdf-datasets/mnist/"; | |||||
| private const string TRAIN_IMAGES = "train-images-idx3-ubyte.gz"; | |||||
| private const string TRAIN_LABELS = "train-labels-idx1-ubyte.gz"; | |||||
| private const string TEST_IMAGES = "t10k-images-idx3-ubyte.gz"; | |||||
| private const string TEST_LABELS = "t10k-labels-idx1-ubyte.gz"; | |||||
| public static Datasets<DataSetMnist> read_data_sets(string train_dir, | |||||
| bool one_hot = false, | |||||
| TF_DataType dtype = TF_DataType.TF_FLOAT, | |||||
| bool reshape = true, | |||||
| int validation_size = 5000, | |||||
| int? train_size = null, | |||||
| int? test_size = null, | |||||
| string source_url = DEFAULT_SOURCE_URL) | |||||
| { | |||||
| if (train_size!=null && validation_size >= train_size) | |||||
| throw new ArgumentException("Validation set should be smaller than training set"); | |||||
| Web.Download(source_url + TRAIN_IMAGES, train_dir, TRAIN_IMAGES); | |||||
| Compress.ExtractGZip(Path.Join(train_dir, TRAIN_IMAGES), train_dir); | |||||
| var train_images = extract_images(Path.Join(train_dir, TRAIN_IMAGES.Split('.')[0]), limit: train_size); | |||||
| Web.Download(source_url + TRAIN_LABELS, train_dir, TRAIN_LABELS); | |||||
| Compress.ExtractGZip(Path.Join(train_dir, TRAIN_LABELS), train_dir); | |||||
| var train_labels = extract_labels(Path.Join(train_dir, TRAIN_LABELS.Split('.')[0]), one_hot: one_hot, limit: train_size); | |||||
| Web.Download(source_url + TEST_IMAGES, train_dir, TEST_IMAGES); | |||||
| Compress.ExtractGZip(Path.Join(train_dir, TEST_IMAGES), train_dir); | |||||
| var test_images = extract_images(Path.Join(train_dir, TEST_IMAGES.Split('.')[0]), limit: test_size); | |||||
| Web.Download(source_url + TEST_LABELS, train_dir, TEST_LABELS); | |||||
| Compress.ExtractGZip(Path.Join(train_dir, TEST_LABELS), train_dir); | |||||
| var test_labels = extract_labels(Path.Join(train_dir, TEST_LABELS.Split('.')[0]), one_hot: one_hot, limit:test_size); | |||||
| int end = train_images.shape[0]; | |||||
| var validation_images = train_images[np.arange(validation_size)]; | |||||
| var validation_labels = train_labels[np.arange(validation_size)]; | |||||
| train_images = train_images[np.arange(validation_size, end)]; | |||||
| train_labels = train_labels[np.arange(validation_size, end)]; | |||||
| var train = new DataSetMnist(train_images, train_labels, dtype, reshape); | |||||
| var validation = new DataSetMnist(validation_images, validation_labels, dtype, reshape); | |||||
| var test = new DataSetMnist(test_images, test_labels, dtype, reshape); | |||||
| return new Datasets<DataSetMnist>(train, validation, test); | |||||
| } | |||||
| public static NDArray extract_images(string file, int? limit=null) | |||||
| { | |||||
| using (var bytestream = new FileStream(file, FileMode.Open)) | |||||
| { | |||||
| var magic = _read32(bytestream); | |||||
| if (magic != 2051) | |||||
| throw new ValueError($"Invalid magic number {magic} in MNIST image file: {file}"); | |||||
| var num_images = _read32(bytestream); | |||||
| num_images = limit == null ? num_images : Math.Min(num_images, (uint)limit); | |||||
| var rows = _read32(bytestream); | |||||
| var cols = _read32(bytestream); | |||||
| var buf = new byte[rows * cols * num_images]; | |||||
| bytestream.Read(buf, 0, buf.Length); | |||||
| var data = np.frombuffer(buf, np.uint8); | |||||
| data = data.reshape((int)num_images, (int)rows, (int)cols, 1); | |||||
| return data; | |||||
| } | |||||
| } | |||||
| public static NDArray extract_labels(string file, bool one_hot = false, int num_classes = 10, int? limit = null) | |||||
| { | |||||
| using (var bytestream = new FileStream(file, FileMode.Open)) | |||||
| { | |||||
| var magic = _read32(bytestream); | |||||
| if (magic != 2049) | |||||
| throw new ValueError($"Invalid magic number {magic} in MNIST label file: {file}"); | |||||
| var num_items = _read32(bytestream); | |||||
| num_items = limit == null ? num_items : Math.Min(num_items,(uint) limit); | |||||
| var buf = new byte[num_items]; | |||||
| bytestream.Read(buf, 0, buf.Length); | |||||
| var labels = np.frombuffer(buf, np.uint8); | |||||
| if (one_hot) | |||||
| return dense_to_one_hot(labels, num_classes); | |||||
| return labels; | |||||
| } | |||||
| } | |||||
| private static NDArray dense_to_one_hot(NDArray labels_dense, int num_classes) | |||||
| { | |||||
| var num_labels = labels_dense.shape[0]; | |||||
| var index_offset = np.arange(num_labels) * num_classes; | |||||
| var labels_one_hot = np.zeros(num_labels, num_classes); | |||||
| for(int row = 0; row < num_labels; row++) | |||||
| { | |||||
| var col = labels_dense.Data<byte>(row); | |||||
| labels_one_hot.SetData(1.0, row, col); | |||||
| } | |||||
| return labels_one_hot; | |||||
| } | |||||
| private static uint _read32(FileStream bytestream) | |||||
| { | |||||
| var buffer = new byte[sizeof(uint)]; | |||||
| var count = bytestream.Read(buffer, 0, 4); | |||||
| return np.frombuffer(buffer, ">u4").Data<uint>(0); | |||||
| } | |||||
| } | |||||
| } | |||||
| @@ -467,5 +467,987 @@ namespace TensorFlowNET.UnitTest | |||||
| } | } | ||||
| #endregion | #endregion | ||||
| } | } | ||||
| private IEnumerable<int> MultiplyArray(IReadOnlyCollection<int> first, IReadOnlyCollection<int> second) | |||||
| { | |||||
| if(first.Count != second.Count) | |||||
| throw new ArgumentException("Arrays should be of equal size!"); | |||||
| var firstEnumerator = first.GetEnumerator(); | |||||
| var secondEnumerator = second.GetEnumerator(); | |||||
| var result = new List<int>(); | |||||
| while (firstEnumerator.MoveNext()) | |||||
| { | |||||
| secondEnumerator.MoveNext(); | |||||
| result.Add(firstEnumerator.Current * secondEnumerator.Current); | |||||
| } | |||||
| firstEnumerator.Dispose(); | |||||
| secondEnumerator.Dispose(); | |||||
| return result; | |||||
| } | |||||
| private IEnumerable<float> MultiplyArray(IReadOnlyCollection<float> first, IReadOnlyCollection<float> second) | |||||
| { | |||||
| if(first.Count != second.Count) | |||||
| throw new ArgumentException("Arrays should be of equal size!"); | |||||
| var firstEnumerator = first.GetEnumerator(); | |||||
| var secondEnumerator = second.GetEnumerator(); | |||||
| var result = new List<float>(); | |||||
| while (firstEnumerator.MoveNext()) | |||||
| { | |||||
| secondEnumerator.MoveNext(); | |||||
| result.Add(firstEnumerator.Current * secondEnumerator.Current); | |||||
| } | |||||
| firstEnumerator.Dispose(); | |||||
| secondEnumerator.Dispose(); | |||||
| return result; | |||||
| } | |||||
| private IEnumerable<double> MultiplyArray(IReadOnlyCollection<double> first, IReadOnlyCollection<double> second) | |||||
| { | |||||
| if(first.Count != second.Count) | |||||
| throw new ArgumentException("Arrays should be of equal size!"); | |||||
| var firstEnumerator = first.GetEnumerator(); | |||||
| var secondEnumerator = second.GetEnumerator(); | |||||
| var result = new List<double>(); | |||||
| while (firstEnumerator.MoveNext()) | |||||
| { | |||||
| secondEnumerator.MoveNext(); | |||||
| result.Add(firstEnumerator.Current * secondEnumerator.Current); | |||||
| } | |||||
| firstEnumerator.Dispose(); | |||||
| secondEnumerator.Dispose(); | |||||
| return result; | |||||
| } | |||||
| [TestMethod] | |||||
| public void mulOpTests() | |||||
| { | |||||
| const int rows = 2; // to avoid broadcasting effect | |||||
| const int cols = 10; | |||||
| #region intTest | |||||
| const int firstIntVal = 2; | |||||
| const int secondIntVal = 3; | |||||
| var firstIntFeed = Enumerable.Repeat(firstIntVal, rows * cols).ToArray(); | |||||
| var secondIntFeed = Enumerable.Repeat(secondIntVal, rows * cols).ToArray(); | |||||
| var intResult = MultiplyArray(firstIntFeed, secondIntFeed).Sum(); | |||||
| var a = tf.placeholder(tf.int32, shape: new TensorShape(rows, cols)); | |||||
| var b = tf.placeholder(tf.int32, shape: new TensorShape(rows, cols)); | |||||
| var c = tf.reduce_sum(tf.reduce_sum(tf.multiply(a, b), 1)); | |||||
| using (var sess = tf.Session()) | |||||
| { | |||||
| var o = sess.run(c, | |||||
| new FeedItem(a, new NDArray(firstIntFeed, new Shape(rows, cols))), | |||||
| new FeedItem(b, new NDArray(secondIntFeed, new Shape(rows, cols)))); | |||||
| Assert.AreEqual((int)o, intResult); | |||||
| } | |||||
| // Testing `operator *(Tensor x, Tensor y) | |||||
| c = tf.reduce_sum(tf.reduce_sum(a * b, 1)); | |||||
| using (var sess = tf.Session()) | |||||
| { | |||||
| var o = sess.run(c, | |||||
| new FeedItem(a, new NDArray(firstIntFeed, new Shape(rows, cols))), | |||||
| new FeedItem(b, new NDArray(secondIntFeed, new Shape(rows, cols)))); | |||||
| Assert.AreEqual((int)o, intResult); | |||||
| } | |||||
| // Testing `operator *(Tensor x, int y) | |||||
| c = tf.reduce_sum(tf.reduce_sum(a * secondIntVal, 1)); | |||||
| using (var sess = tf.Session()) | |||||
| { | |||||
| var o = sess.run(c, | |||||
| new FeedItem(a, new NDArray(firstIntFeed, new Shape(rows, cols)))); | |||||
| Assert.AreEqual((int)o, intResult); | |||||
| } | |||||
| // Testing `operator *(int x, Tensor y) | |||||
| c = tf.reduce_sum(tf.reduce_sum(firstIntVal * b, 1)); | |||||
| using (var sess = tf.Session()) | |||||
| { | |||||
| var o = sess.run(c, | |||||
| new FeedItem(b, new NDArray(secondIntFeed, new Shape(rows, cols)))); | |||||
| Assert.AreEqual((int)o, intResult); | |||||
| } | |||||
| #endregion | |||||
| #region floatTest | |||||
| const float firstFloatVal = 2.0f; | |||||
| const float secondFloatVal = 3.0f; | |||||
| var firstFloatFeed = Enumerable.Repeat(firstFloatVal, rows * cols).ToArray(); | |||||
| var secondFloatFeed = Enumerable.Repeat(secondFloatVal, rows * cols).ToArray(); | |||||
| var floatResult = MultiplyArray(firstFloatFeed, secondFloatFeed).Sum(); | |||||
| a = tf.placeholder(tf.float32, shape: new TensorShape(rows, cols)); | |||||
| b = tf.placeholder(tf.float32, shape: new TensorShape(rows, cols)); | |||||
| c = tf.reduce_sum(tf.reduce_sum(tf.multiply(a, b), 1)); | |||||
| using (var sess = tf.Session()) | |||||
| { | |||||
| var o = sess.run(c, | |||||
| new FeedItem(a, new NDArray(firstFloatFeed, new Shape(rows, cols))), | |||||
| new FeedItem(b, new NDArray(secondFloatFeed, new Shape(rows, cols)))); | |||||
| Assert.AreEqual((float)o, floatResult); | |||||
| } | |||||
| // Testing `operator *(Tensor x, Tensor y) | |||||
| c = tf.reduce_sum(tf.reduce_sum(a * b, 1)); | |||||
| using (var sess = tf.Session()) | |||||
| { | |||||
| var o = sess.run(c, | |||||
| new FeedItem(a, new NDArray(firstFloatFeed, new Shape(rows, cols))), | |||||
| new FeedItem(b, new NDArray(secondFloatFeed, new Shape(rows, cols)))); | |||||
| Assert.AreEqual((float)o, floatResult); | |||||
| } | |||||
| // Testing `operator *(Tensor x, float y) | |||||
| c = tf.reduce_sum(tf.reduce_sum(a * secondFloatVal, 1)); | |||||
| using (var sess = tf.Session()) | |||||
| { | |||||
| var o = sess.run(c, | |||||
| new FeedItem(a, new NDArray(firstFloatFeed, new Shape(rows, cols)))); | |||||
| Assert.AreEqual((float)o, floatResult); | |||||
| } | |||||
| // Testing `operator *(float x, Tensor y) | |||||
| c = tf.reduce_sum(tf.reduce_sum(firstFloatVal * b, 1)); | |||||
| using (var sess = tf.Session()) | |||||
| { | |||||
| var o = sess.run(c, | |||||
| new FeedItem(b, new NDArray(secondFloatFeed, new Shape(rows, cols)))); | |||||
| Assert.AreEqual((float)o, floatResult); | |||||
| } | |||||
| #endregion | |||||
| #region doubleTest | |||||
| const double firstDoubleVal = 2.0; | |||||
| const double secondDoubleVal = 3.0; | |||||
| var firstDoubleFeed = Enumerable.Repeat(firstDoubleVal, rows * cols).ToArray(); | |||||
| var secondDoubleFeed = Enumerable.Repeat(secondDoubleVal, rows * cols).ToArray(); | |||||
| var doubleResult = MultiplyArray(firstDoubleFeed, secondDoubleFeed).Sum(); | |||||
| a = tf.placeholder(tf.float64, shape: new TensorShape(rows, cols)); | |||||
| b = tf.placeholder(tf.float64, shape: new TensorShape(rows, cols)); | |||||
| c = tf.reduce_sum(tf.reduce_sum(tf.multiply(a, b), 1)); | |||||
| using (var sess = tf.Session()) | |||||
| { | |||||
| var o = sess.run(c, | |||||
| new FeedItem(a, new NDArray(firstDoubleFeed, new Shape(rows, cols))), | |||||
| new FeedItem(b, new NDArray(secondDoubleFeed, new Shape(rows, cols)))); | |||||
| Assert.AreEqual((double)o, doubleResult); | |||||
| } | |||||
| // Testing `operator *(Tensor x, Tensor y) | |||||
| c = tf.reduce_sum(tf.reduce_sum(a * b, 1)); | |||||
| using (var sess = tf.Session()) | |||||
| { | |||||
| var o = sess.run(c, | |||||
| new FeedItem(a, new NDArray(firstDoubleFeed, new Shape(rows, cols))), | |||||
| new FeedItem(b, new NDArray(secondDoubleFeed, new Shape(rows, cols)))); | |||||
| Assert.AreEqual((double)o, doubleResult); | |||||
| } | |||||
| // Testing `operator *(Tensor x, double y) | |||||
| c = tf.reduce_sum(tf.reduce_sum(a * secondFloatVal, 1)); | |||||
| using (var sess = tf.Session()) | |||||
| { | |||||
| var o = sess.run(c, | |||||
| new FeedItem(a, new NDArray(firstDoubleFeed, new Shape(rows, cols)))); | |||||
| Assert.AreEqual((double)o, doubleResult); | |||||
| } | |||||
| // Testing `operator *(double x, Tensor y) | |||||
| c = tf.reduce_sum(tf.reduce_sum(firstFloatVal * b, 1)); | |||||
| using (var sess = tf.Session()) | |||||
| { | |||||
| var o = sess.run(c, | |||||
| new FeedItem(b, new NDArray(secondDoubleFeed, new Shape(rows, cols)))); | |||||
| Assert.AreEqual((double) o, doubleResult); | |||||
| } | |||||
| #endregion | |||||
| } | |||||
| [TestMethod] | |||||
| public void divOpTests() | |||||
| { | |||||
| const int rows = 2; // to avoid broadcasting effect | |||||
| const int cols = 10; | |||||
| #region intTest | |||||
| const int firstIntVal = 6; | |||||
| const int secondIntVal = 3; | |||||
| var firstIntFeed = Enumerable.Repeat(firstIntVal, rows * cols).ToArray(); | |||||
| var secondIntFeed = Enumerable.Repeat(secondIntVal, rows * cols).ToArray(); | |||||
| var intResult = (int)(firstIntFeed.Sum() / (float)secondIntVal); | |||||
| var a = tf.placeholder(tf.int32, shape: new TensorShape(rows, cols)); | |||||
| var b = tf.placeholder(tf.int32, shape: new TensorShape(rows, cols)); | |||||
| var c = tf.reduce_sum(tf.reduce_sum(gen_math_ops.floor_div(a, b), 1)); | |||||
| using (var sess = tf.Session()) | |||||
| { | |||||
| var o = sess.run(c, | |||||
| new FeedItem(a, new NDArray(firstIntFeed, new Shape(rows, cols))), | |||||
| new FeedItem(b, new NDArray(secondIntFeed, new Shape(rows, cols)))); | |||||
| Assert.AreEqual((int)o, intResult); | |||||
| } | |||||
| // Testing `operator /(Tensor x, Tensor y) | |||||
| c = tf.reduce_sum(tf.reduce_sum(a / b, 1)); | |||||
| using (var sess = tf.Session()) | |||||
| { | |||||
| var o = sess.run(c, | |||||
| new FeedItem(a, new NDArray(firstIntFeed, new Shape(rows, cols))), | |||||
| new FeedItem(b, new NDArray(secondIntFeed, new Shape(rows, cols)))); | |||||
| Assert.AreEqual((int)o, intResult); | |||||
| } | |||||
| // Testing `operator /(Tensor x, int y) | |||||
| c = tf.reduce_sum(tf.reduce_sum(a / secondIntVal, 1)); | |||||
| using (var sess = tf.Session()) | |||||
| { | |||||
| var o = sess.run(c, | |||||
| new FeedItem(a, new NDArray(firstIntFeed, new Shape(rows, cols)))); | |||||
| Assert.AreEqual((int)o, intResult); | |||||
| } | |||||
| // Testing `operator /(int x, Tensor y) | |||||
| c = tf.reduce_sum(tf.reduce_sum(firstIntVal / b, 1)); | |||||
| using (var sess = tf.Session()) | |||||
| { | |||||
| var o = sess.run(c, | |||||
| new FeedItem(b, new NDArray(secondIntFeed, new Shape(rows, cols)))); | |||||
| Assert.AreEqual((int)o, intResult); | |||||
| } | |||||
| #endregion | |||||
| #region floatTest | |||||
| const float firstFloatVal = 6.0f; | |||||
| const float secondFloatVal = 3.0f; | |||||
| var firstFloatFeed = Enumerable.Repeat(firstFloatVal, rows * cols).ToArray(); | |||||
| var secondFloatFeed = Enumerable.Repeat(secondFloatVal, rows * cols).ToArray(); | |||||
| var floatResult = MultiplyArray(firstFloatFeed, secondFloatFeed.Select(x => 1/x).ToArray()).Sum(); | |||||
| a = tf.placeholder(tf.float32, shape: new TensorShape(rows, cols)); | |||||
| b = tf.placeholder(tf.float32, shape: new TensorShape(rows, cols)); | |||||
| c = tf.reduce_sum(tf.reduce_sum(tf.divide(a, b), 1)); | |||||
| using (var sess = tf.Session()) | |||||
| { | |||||
| var o = sess.run(c, | |||||
| new FeedItem(a, new NDArray(firstFloatFeed, new Shape(rows, cols))), | |||||
| new FeedItem(b, new NDArray(secondFloatFeed, new Shape(rows, cols)))); | |||||
| Assert.AreEqual((float)o, floatResult); | |||||
| } | |||||
| // Testing `operator /(Tensor x, Tensor y) | |||||
| c = tf.reduce_sum(tf.reduce_sum(a / b, 1)); | |||||
| using (var sess = tf.Session()) | |||||
| { | |||||
| var o = sess.run(c, | |||||
| new FeedItem(a, new NDArray(firstFloatFeed, new Shape(rows, cols))), | |||||
| new FeedItem(b, new NDArray(secondFloatFeed, new Shape(rows, cols)))); | |||||
| Assert.AreEqual((float)o, floatResult); | |||||
| } | |||||
| // Testing `operator /(Tensor x, float y) | |||||
| c = tf.reduce_sum(tf.reduce_sum(a / secondFloatVal, 1)); | |||||
| using (var sess = tf.Session()) | |||||
| { | |||||
| var o = sess.run(c, | |||||
| new FeedItem(a, new NDArray(firstFloatFeed, new Shape(rows, cols)))); | |||||
| Assert.AreEqual((float)o, floatResult); | |||||
| } | |||||
| // Testing `operator /(float x, Tensor y) | |||||
| c = tf.reduce_sum(tf.reduce_sum(firstFloatVal / b, 1)); | |||||
| using (var sess = tf.Session()) | |||||
| { | |||||
| var o = sess.run(c, | |||||
| new FeedItem(b, new NDArray(secondFloatFeed, new Shape(rows, cols)))); | |||||
| Assert.AreEqual((float)o, floatResult); | |||||
| } | |||||
| #endregion | |||||
| #region doubleTest | |||||
| const double firstDoubleVal = 6.0; | |||||
| const double secondDoubleVal = 3.0; | |||||
| var firstDoubleFeed = Enumerable.Repeat(firstDoubleVal, rows * cols).ToArray(); | |||||
| var secondDoubleFeed = Enumerable.Repeat(secondDoubleVal, rows * cols).ToArray(); | |||||
| var doubleResult = MultiplyArray(firstDoubleFeed, secondDoubleFeed.Select(x => 1/x).ToArray()).Sum(); | |||||
| a = tf.placeholder(tf.float64, shape: new TensorShape(rows, cols)); | |||||
| b = tf.placeholder(tf.float64, shape: new TensorShape(rows, cols)); | |||||
| c = tf.reduce_sum(tf.reduce_sum(tf.divide(a, b), 1)); | |||||
| using (var sess = tf.Session()) | |||||
| { | |||||
| var o = sess.run(c, | |||||
| new FeedItem(a, new NDArray(firstDoubleFeed, new Shape(rows, cols))), | |||||
| new FeedItem(b, new NDArray(secondDoubleFeed, new Shape(rows, cols)))); | |||||
| Assert.AreEqual((double)o, doubleResult); | |||||
| } | |||||
| // Testing `operator /(Tensor x, Tensor y) | |||||
| c = tf.reduce_sum(tf.reduce_sum(a / b, 1)); | |||||
| using (var sess = tf.Session()) | |||||
| { | |||||
| var o = sess.run(c, | |||||
| new FeedItem(a, new NDArray(firstDoubleFeed, new Shape(rows, cols))), | |||||
| new FeedItem(b, new NDArray(secondDoubleFeed, new Shape(rows, cols)))); | |||||
| Assert.AreEqual((double)o, doubleResult); | |||||
| } | |||||
| // Testing `operator /(Tensor x, double y) | |||||
| c = tf.reduce_sum(tf.reduce_sum(a / secondFloatVal, 1)); | |||||
| using (var sess = tf.Session()) | |||||
| { | |||||
| var o = sess.run(c, | |||||
| new FeedItem(a, new NDArray(firstDoubleFeed, new Shape(rows, cols)))); | |||||
| Assert.AreEqual((double)o, doubleResult); | |||||
| } | |||||
| // Testing `operator /(double x, Tensor y) | |||||
| c = tf.reduce_sum(tf.reduce_sum(firstFloatVal / b, 1)); | |||||
| using (var sess = tf.Session()) | |||||
| { | |||||
| var o = sess.run(c, | |||||
| new FeedItem(b, new NDArray(secondDoubleFeed, new Shape(rows, cols)))); | |||||
| Assert.AreEqual((double)o, doubleResult); | |||||
| } | |||||
| #endregion | |||||
| } | |||||
| [TestMethod] | |||||
| public void greaterThanOpTests() | |||||
| { | |||||
| const int rows = 2; // to avoid broadcasting effect | |||||
| const int cols = 10; | |||||
| #region intTest | |||||
| const int intThreshold = 10; | |||||
| var firstIntFeed = Enumerable.Range(0, rows * cols).ToArray(); | |||||
| var secondIntFeed = Enumerable.Repeat(intThreshold, rows * cols).ToArray(); | |||||
| var intResult = firstIntFeed.Count(elem => elem > intThreshold); | |||||
| var intResultTwo = firstIntFeed.Count(elem => elem < intThreshold); | |||||
| var a = tf.placeholder(tf.int32, shape: new TensorShape(rows, cols)); | |||||
| var b = tf.placeholder(tf.int32, shape: new TensorShape(rows, cols)); | |||||
| var c = tf.reduce_sum(tf.reduce_sum(tf.cast(tf.greater(a, b), tf.int32), 1)); | |||||
| using (var sess = tf.Session()) | |||||
| { | |||||
| var o = sess.run(c, | |||||
| new FeedItem(a, new NDArray(firstIntFeed, new Shape(rows, cols))), | |||||
| new FeedItem(b, new NDArray(secondIntFeed, new Shape(rows, cols)))); | |||||
| Assert.AreEqual((int)o, intResult); | |||||
| } | |||||
| // Testing `operator >(Tensor x, Tensor y) | |||||
| c = tf.reduce_sum(tf.reduce_sum(tf.cast(a > b, tf.int32), 1)); | |||||
| using (var sess = tf.Session()) | |||||
| { | |||||
| var o = sess.run(c, | |||||
| new FeedItem(a, new NDArray(firstIntFeed, new Shape(rows, cols))), | |||||
| new FeedItem(b, new NDArray(secondIntFeed, new Shape(rows, cols)))); | |||||
| Assert.AreEqual((int)o, intResult); | |||||
| } | |||||
| // Testing `operator >(Tensor x, int y) | |||||
| c = tf.reduce_sum(tf.reduce_sum(tf.cast(a > intThreshold, tf.int32), 1)); | |||||
| using (var sess = tf.Session()) | |||||
| { | |||||
| var o = sess.run(c, | |||||
| new FeedItem(a, new NDArray(firstIntFeed, new Shape(rows, cols)))); | |||||
| Assert.AreEqual((int)o, intResult); | |||||
| } | |||||
| // Testing `operator >(int x, Tensor y) | |||||
| c = tf.reduce_sum(tf.reduce_sum(tf.cast(intThreshold > a, tf.int32), 1)); | |||||
| using (var sess = tf.Session()) | |||||
| { | |||||
| var o = sess.run(c, | |||||
| new FeedItem(a, new NDArray(firstIntFeed, new Shape(rows, cols)))); | |||||
| Assert.AreEqual((int)o, intResultTwo); | |||||
| } | |||||
| #endregion | |||||
| #region floatTest | |||||
| const float floatThreshold = 10.0f; | |||||
| var firstFloatFeed = Enumerable.Range(0, rows * cols).Select(elem => (float)elem).ToArray(); | |||||
| var secondFloatFeed = Enumerable.Repeat(floatThreshold, rows * cols).ToArray(); | |||||
| var floatResult = firstFloatFeed.Count(elem => elem > floatThreshold); | |||||
| var floatResultTwo = firstFloatFeed.Count(elem => elem < floatThreshold); | |||||
| a = tf.placeholder(tf.float32, shape: new TensorShape(rows, cols)); | |||||
| b = tf.placeholder(tf.float32, shape: new TensorShape(rows, cols)); | |||||
| c = tf.reduce_sum(tf.reduce_sum(tf.cast(tf.greater(a, b), tf.int32), 1)); | |||||
| using (var sess = tf.Session()) | |||||
| { | |||||
| var o = sess.run(c, | |||||
| new FeedItem(a, new NDArray(firstFloatFeed, new Shape(rows, cols))), | |||||
| new FeedItem(b, new NDArray(secondFloatFeed, new Shape(rows, cols)))); | |||||
| Assert.AreEqual((int)o, floatResult); | |||||
| } | |||||
| // Testing `operator >(Tensor x, Tensor y) | |||||
| c = tf.reduce_sum(tf.reduce_sum(tf.cast(a > b, tf.int32), 1)); | |||||
| using (var sess = tf.Session()) | |||||
| { | |||||
| var o = sess.run(c, | |||||
| new FeedItem(a, new NDArray(firstFloatFeed, new Shape(rows, cols))), | |||||
| new FeedItem(b, new NDArray(secondFloatFeed, new Shape(rows, cols)))); | |||||
| Assert.AreEqual((int)o, floatResult); | |||||
| } | |||||
| // Testing `operator >(Tensor x, float y) | |||||
| c = tf.reduce_sum(tf.reduce_sum(tf.cast(a > floatThreshold, tf.int32), 1)); | |||||
| using (var sess = tf.Session()) | |||||
| { | |||||
| var o = sess.run(c, | |||||
| new FeedItem(a, new NDArray(firstFloatFeed, new Shape(rows, cols)))); | |||||
| Assert.AreEqual((int)o, floatResult); | |||||
| } | |||||
| // Testing `operator >(float x, Tensor y) | |||||
| c = tf.reduce_sum(tf.reduce_sum(tf.cast(floatThreshold > a, tf.int32), 1)); | |||||
| using (var sess = tf.Session()) | |||||
| { | |||||
| var o = sess.run(c, | |||||
| new FeedItem(a, new NDArray(firstFloatFeed, new Shape(rows, cols)))); | |||||
| Assert.AreEqual((int)o, floatResultTwo); | |||||
| } | |||||
| #endregion | |||||
| #region doubleTest | |||||
| const double doubleThreshold = 10.0; | |||||
| var firstDoubleFeed = Enumerable.Repeat(0, rows * cols).Select(elem => (double)elem).ToArray(); | |||||
| var secondDoubleFeed = Enumerable.Repeat(doubleThreshold, rows * cols).ToArray(); | |||||
| var doubleResult = firstDoubleFeed.Count(elem => elem > doubleThreshold); | |||||
| var doubleResultTwo = firstDoubleFeed.Count(elem => elem < doubleThreshold); | |||||
| a = tf.placeholder(tf.float64, shape: new TensorShape(rows, cols)); | |||||
| b = tf.placeholder(tf.float64, shape: new TensorShape(rows, cols)); | |||||
| c = tf.reduce_sum(tf.reduce_sum(tf.cast(tf.greater(a, b), tf.int32), 1)); | |||||
| using (var sess = tf.Session()) | |||||
| { | |||||
| var o = sess.run(c, | |||||
| new FeedItem(a, new NDArray(firstDoubleFeed, new Shape(rows, cols))), | |||||
| new FeedItem(b, new NDArray(secondDoubleFeed, new Shape(rows, cols)))); | |||||
| Assert.AreEqual((int)o, doubleResult); | |||||
| } | |||||
| // Testing `operator >(Tensor x, Tensor y) | |||||
| c = tf.reduce_sum(tf.reduce_sum(tf.cast(a > b, tf.int32), 1)); | |||||
| using (var sess = tf.Session()) | |||||
| { | |||||
| var o = sess.run(c, | |||||
| new FeedItem(a, new NDArray(firstDoubleFeed, new Shape(rows, cols))), | |||||
| new FeedItem(b, new NDArray(secondDoubleFeed, new Shape(rows, cols)))); | |||||
| Assert.AreEqual((int)o, doubleResult); | |||||
| } | |||||
| // Testing `operator >(Tensor x, double y) | |||||
| c = tf.reduce_sum(tf.reduce_sum(tf.cast(a > doubleThreshold, tf.int32), 1)); | |||||
| using (var sess = tf.Session()) | |||||
| { | |||||
| var o = sess.run(c, | |||||
| new FeedItem(a, new NDArray(firstDoubleFeed, new Shape(rows, cols)))); | |||||
| Assert.AreEqual((int)o, doubleResult); | |||||
| } | |||||
| // Testing `operator >(double x, Tensor y) | |||||
| c = tf.reduce_sum(tf.reduce_sum(tf.cast(doubleThreshold > a, tf.int32), 1)); | |||||
| using (var sess = tf.Session()) | |||||
| { | |||||
| var o = sess.run(c, | |||||
| new FeedItem(a, new NDArray(firstDoubleFeed, new Shape(rows, cols)))); | |||||
| Assert.AreEqual((int)o, doubleResultTwo); | |||||
| } | |||||
| #endregion | |||||
| } | |||||
| [TestMethod] | |||||
| public void lessThanOpTests() | |||||
| { | |||||
| const int rows = 2; // to avoid broadcasting effect | |||||
| const int cols = 10; | |||||
| #region intTest | |||||
| const int intThreshold = 10; | |||||
| var firstIntFeed = Enumerable.Range(0, rows * cols).ToArray(); | |||||
| var secondIntFeed = Enumerable.Repeat(intThreshold, rows * cols).ToArray(); | |||||
| var intResult = firstIntFeed.Count(elem => elem < intThreshold); | |||||
| var intResultTwo = firstIntFeed.Count(elem => elem > intThreshold); | |||||
| var a = tf.placeholder(tf.int32, shape: new TensorShape(rows, cols)); | |||||
| var b = tf.placeholder(tf.int32, shape: new TensorShape(rows, cols)); | |||||
| var c = tf.reduce_sum(tf.reduce_sum(tf.cast(tf.less(a, b), tf.int32), 1)); | |||||
| using (var sess = tf.Session()) | |||||
| { | |||||
| var o = sess.run(c, | |||||
| new FeedItem(a, new NDArray(firstIntFeed, new Shape(rows, cols))), | |||||
| new FeedItem(b, new NDArray(secondIntFeed, new Shape(rows, cols)))); | |||||
| Assert.AreEqual((int)o, intResult); | |||||
| } | |||||
| // Testing `operator <(Tensor x, Tensor y) | |||||
| c = tf.reduce_sum(tf.reduce_sum(tf.cast(a < b, tf.int32), 1)); | |||||
| using (var sess = tf.Session()) | |||||
| { | |||||
| var o = sess.run(c, | |||||
| new FeedItem(a, new NDArray(firstIntFeed, new Shape(rows, cols))), | |||||
| new FeedItem(b, new NDArray(secondIntFeed, new Shape(rows, cols)))); | |||||
| Assert.AreEqual((int)o, intResult); | |||||
| } | |||||
| // Testing `operator <(Tensor x, int y) | |||||
| c = tf.reduce_sum(tf.reduce_sum(tf.cast(a < intThreshold, tf.int32), 1)); | |||||
| using (var sess = tf.Session()) | |||||
| { | |||||
| var o = sess.run(c, | |||||
| new FeedItem(a, new NDArray(firstIntFeed, new Shape(rows, cols)))); | |||||
| Assert.AreEqual((int)o, intResult); | |||||
| } | |||||
| // Testing `operator <(int x, Tensor y) | |||||
| c = tf.reduce_sum(tf.reduce_sum(tf.cast(intThreshold < a, tf.int32), 1)); | |||||
| using (var sess = tf.Session()) | |||||
| { | |||||
| var o = sess.run(c, | |||||
| new FeedItem(a, new NDArray(firstIntFeed, new Shape(rows, cols)))); | |||||
| Assert.AreEqual((int)o, intResultTwo); | |||||
| } | |||||
| #endregion | |||||
| #region floatTest | |||||
| const float floatThreshold = 10.0f; | |||||
| var firstFloatFeed = Enumerable.Range(0, rows * cols).Select(elem => (float)elem).ToArray(); | |||||
| var secondFloatFeed = Enumerable.Repeat(floatThreshold, rows * cols).ToArray(); | |||||
| var floatResult = firstFloatFeed.Count(elem => elem < floatThreshold); | |||||
| var floatResultTwo = firstFloatFeed.Count(elem => elem > floatThreshold); | |||||
| a = tf.placeholder(tf.float32, shape: new TensorShape(rows, cols)); | |||||
| b = tf.placeholder(tf.float32, shape: new TensorShape(rows, cols)); | |||||
| c = tf.reduce_sum(tf.reduce_sum(tf.cast(tf.less(a, b), tf.int32), 1)); | |||||
| using (var sess = tf.Session()) | |||||
| { | |||||
| var o = sess.run(c, | |||||
| new FeedItem(a, new NDArray(firstFloatFeed, new Shape(rows, cols))), | |||||
| new FeedItem(b, new NDArray(secondFloatFeed, new Shape(rows, cols)))); | |||||
| Assert.AreEqual((int)o, floatResult); | |||||
| } | |||||
| // Testing `operator <(Tensor x, Tensor y) | |||||
| c = tf.reduce_sum(tf.reduce_sum(tf.cast(a < b, tf.int32), 1)); | |||||
| using (var sess = tf.Session()) | |||||
| { | |||||
| var o = sess.run(c, | |||||
| new FeedItem(a, new NDArray(firstFloatFeed, new Shape(rows, cols))), | |||||
| new FeedItem(b, new NDArray(secondFloatFeed, new Shape(rows, cols)))); | |||||
| Assert.AreEqual((int)o, floatResult); | |||||
| } | |||||
| // Testing `operator <(Tensor x, float y) | |||||
| c = tf.reduce_sum(tf.reduce_sum(tf.cast(a < floatThreshold, tf.int32), 1)); | |||||
| using (var sess = tf.Session()) | |||||
| { | |||||
| var o = sess.run(c, | |||||
| new FeedItem(a, new NDArray(firstFloatFeed, new Shape(rows, cols)))); | |||||
| Assert.AreEqual((int)o, floatResult); | |||||
| } | |||||
| // Testing `operator <(float x, Tensor y) | |||||
| c = tf.reduce_sum(tf.reduce_sum(tf.cast(floatThreshold < a, tf.int32), 1)); | |||||
| using (var sess = tf.Session()) | |||||
| { | |||||
| var o = sess.run(c, | |||||
| new FeedItem(a, new NDArray(firstFloatFeed, new Shape(rows, cols)))); | |||||
| Assert.AreEqual((int)o, floatResultTwo); | |||||
| } | |||||
| #endregion | |||||
| #region doubleTest | |||||
| const double doubleThreshold = 10.0; | |||||
| var firstDoubleFeed = Enumerable.Repeat(0, rows * cols).Select(elem => (double)elem).ToArray(); | |||||
| var secondDoubleFeed = Enumerable.Repeat(doubleThreshold, rows * cols).ToArray(); | |||||
| var doubleResult = firstDoubleFeed.Count(elem => elem < doubleThreshold); | |||||
| var doubleResultTwo = firstDoubleFeed.Count(elem => elem > doubleThreshold); | |||||
| a = tf.placeholder(tf.float64, shape: new TensorShape(rows, cols)); | |||||
| b = tf.placeholder(tf.float64, shape: new TensorShape(rows, cols)); | |||||
| c = tf.reduce_sum(tf.reduce_sum(tf.cast(tf.less(a, b), tf.int32), 1)); | |||||
| using (var sess = tf.Session()) | |||||
| { | |||||
| var o = sess.run(c, | |||||
| new FeedItem(a, new NDArray(firstDoubleFeed, new Shape(rows, cols))), | |||||
| new FeedItem(b, new NDArray(secondDoubleFeed, new Shape(rows, cols)))); | |||||
| Assert.AreEqual((int)o, doubleResult); | |||||
| } | |||||
| // Testing `operator <(Tensor x, Tensor y) | |||||
| c = tf.reduce_sum(tf.reduce_sum(tf.cast(a < b, tf.int32), 1)); | |||||
| using (var sess = tf.Session()) | |||||
| { | |||||
| var o = sess.run(c, | |||||
| new FeedItem(a, new NDArray(firstDoubleFeed, new Shape(rows, cols))), | |||||
| new FeedItem(b, new NDArray(secondDoubleFeed, new Shape(rows, cols)))); | |||||
| Assert.AreEqual((int)o, doubleResult); | |||||
| } | |||||
| // Testing `operator <(Tensor x, double y) | |||||
| c = tf.reduce_sum(tf.reduce_sum(tf.cast(a < doubleThreshold, tf.int32), 1)); | |||||
| using (var sess = tf.Session()) | |||||
| { | |||||
| var o = sess.run(c, | |||||
| new FeedItem(a, new NDArray(firstDoubleFeed, new Shape(rows, cols)))); | |||||
| Assert.AreEqual((int)o, doubleResult); | |||||
| } | |||||
| // Testing `operator <(double x, Tensor y) | |||||
| c = tf.reduce_sum(tf.reduce_sum(tf.cast(doubleThreshold < a, tf.int32), 1)); | |||||
| using (var sess = tf.Session()) | |||||
| { | |||||
| var o = sess.run(c, | |||||
| new FeedItem(a, new NDArray(firstDoubleFeed, new Shape(rows, cols)))); | |||||
| Assert.AreEqual((int)o, doubleResultTwo); | |||||
| } | |||||
| #endregion | |||||
| } | |||||
| [TestMethod] | |||||
| public void greaterOrEqualThanOpTests() | |||||
| { | |||||
| const int rows = 2; // to avoid broadcasting effect | |||||
| const int cols = 10; | |||||
| #region intTest | |||||
| const int intThreshold = 10; | |||||
| var firstIntFeed = Enumerable.Range(0, rows * cols).ToArray(); | |||||
| var secondIntFeed = Enumerable.Repeat(intThreshold, rows * cols).ToArray(); | |||||
| var intResult = firstIntFeed.Count(elem => elem >= intThreshold); | |||||
| var intResultTwo = firstIntFeed.Count(elem => elem <= intThreshold); | |||||
| var a = tf.placeholder(tf.int32, shape: new TensorShape(rows, cols)); | |||||
| var b = tf.placeholder(tf.int32, shape: new TensorShape(rows, cols)); | |||||
| var c = tf.reduce_sum(tf.reduce_sum(tf.cast(tf.greater_equal(a, b), tf.int32), 1)); | |||||
| using (var sess = tf.Session()) | |||||
| { | |||||
| var o = sess.run(c, | |||||
| new FeedItem(a, new NDArray(firstIntFeed, new Shape(rows, cols))), | |||||
| new FeedItem(b, new NDArray(secondIntFeed, new Shape(rows, cols)))); | |||||
| Assert.AreEqual((int)o, intResult); | |||||
| } | |||||
| // Testing `operator >=(Tensor x, Tensor y) | |||||
| c = tf.reduce_sum(tf.reduce_sum(tf.cast(a >= b, tf.int32), 1)); | |||||
| using (var sess = tf.Session()) | |||||
| { | |||||
| var o = sess.run(c, | |||||
| new FeedItem(a, new NDArray(firstIntFeed, new Shape(rows, cols))), | |||||
| new FeedItem(b, new NDArray(secondIntFeed, new Shape(rows, cols)))); | |||||
| Assert.AreEqual((int)o, intResult); | |||||
| } | |||||
| // Testing `operator >=(Tensor x, int y) | |||||
| c = tf.reduce_sum(tf.reduce_sum(tf.cast(a >= intThreshold, tf.int32), 1)); | |||||
| using (var sess = tf.Session()) | |||||
| { | |||||
| var o = sess.run(c, | |||||
| new FeedItem(a, new NDArray(firstIntFeed, new Shape(rows, cols)))); | |||||
| Assert.AreEqual((int)o, intResult); | |||||
| } | |||||
| // Testing `operator >=(int x, Tensor y) | |||||
| c = tf.reduce_sum(tf.reduce_sum(tf.cast(intThreshold >= a, tf.int32), 1)); | |||||
| using (var sess = tf.Session()) | |||||
| { | |||||
| var o = sess.run(c, | |||||
| new FeedItem(a, new NDArray(firstIntFeed, new Shape(rows, cols)))); | |||||
| Assert.AreEqual((int)o, intResultTwo); | |||||
| } | |||||
| #endregion | |||||
| #region floatTest | |||||
| const float floatThreshold = 10.0f; | |||||
| var firstFloatFeed = Enumerable.Range(0, rows * cols).Select(elem => (float)elem).ToArray(); | |||||
| var secondFloatFeed = Enumerable.Repeat(floatThreshold, rows * cols).ToArray(); | |||||
| var floatResult = firstFloatFeed.Count(elem => elem >= floatThreshold); | |||||
| var floatResultTwo = firstFloatFeed.Count(elem => elem <= floatThreshold); | |||||
| a = tf.placeholder(tf.float32, shape: new TensorShape(rows, cols)); | |||||
| b = tf.placeholder(tf.float32, shape: new TensorShape(rows, cols)); | |||||
| c = tf.reduce_sum(tf.reduce_sum(tf.cast(tf.greater_equal(a, b), tf.int32), 1)); | |||||
| using (var sess = tf.Session()) | |||||
| { | |||||
| var o = sess.run(c, | |||||
| new FeedItem(a, new NDArray(firstFloatFeed, new Shape(rows, cols))), | |||||
| new FeedItem(b, new NDArray(secondFloatFeed, new Shape(rows, cols)))); | |||||
| Assert.AreEqual((int)o, floatResult); | |||||
| } | |||||
| // Testing `operator >=(Tensor x, Tensor y) | |||||
| c = tf.reduce_sum(tf.reduce_sum(tf.cast(a >= b, tf.int32), 1)); | |||||
| using (var sess = tf.Session()) | |||||
| { | |||||
| var o = sess.run(c, | |||||
| new FeedItem(a, new NDArray(firstFloatFeed, new Shape(rows, cols))), | |||||
| new FeedItem(b, new NDArray(secondFloatFeed, new Shape(rows, cols)))); | |||||
| Assert.AreEqual((int)o, floatResult); | |||||
| } | |||||
| // Testing `operator >=(Tensor x, float y) | |||||
| c = tf.reduce_sum(tf.reduce_sum(tf.cast(a >= floatThreshold, tf.int32), 1)); | |||||
| using (var sess = tf.Session()) | |||||
| { | |||||
| var o = sess.run(c, | |||||
| new FeedItem(a, new NDArray(firstFloatFeed, new Shape(rows, cols)))); | |||||
| Assert.AreEqual((int)o, floatResult); | |||||
| } | |||||
| // Testing `operator >=(float x, Tensor y) | |||||
| c = tf.reduce_sum(tf.reduce_sum(tf.cast(floatThreshold >= a, tf.int32), 1)); | |||||
| using (var sess = tf.Session()) | |||||
| { | |||||
| var o = sess.run(c, | |||||
| new FeedItem(a, new NDArray(firstFloatFeed, new Shape(rows, cols)))); | |||||
| Assert.AreEqual((int)o, floatResultTwo); | |||||
| } | |||||
| #endregion | |||||
| #region doubleTest | |||||
| const double doubleThreshold = 10.0; | |||||
| var firstDoubleFeed = Enumerable.Repeat(0, rows * cols).Select(elem => (double)elem).ToArray(); | |||||
| var secondDoubleFeed = Enumerable.Repeat(doubleThreshold, rows * cols).ToArray(); | |||||
| var doubleResult = firstDoubleFeed.Count(elem => elem >= doubleThreshold); | |||||
| var doubleResultTwo = firstDoubleFeed.Count(elem => elem <= doubleThreshold); | |||||
| a = tf.placeholder(tf.float64, shape: new TensorShape(rows, cols)); | |||||
| b = tf.placeholder(tf.float64, shape: new TensorShape(rows, cols)); | |||||
| c = tf.reduce_sum(tf.reduce_sum(tf.cast(tf.greater_equal(a, b), tf.int32), 1)); | |||||
| using (var sess = tf.Session()) | |||||
| { | |||||
| var o = sess.run(c, | |||||
| new FeedItem(a, new NDArray(firstDoubleFeed, new Shape(rows, cols))), | |||||
| new FeedItem(b, new NDArray(secondDoubleFeed, new Shape(rows, cols)))); | |||||
| Assert.AreEqual((int)o, doubleResult); | |||||
| } | |||||
| // Testing `operator >=(Tensor x, Tensor y) | |||||
| c = tf.reduce_sum(tf.reduce_sum(tf.cast(a >= b, tf.int32), 1)); | |||||
| using (var sess = tf.Session()) | |||||
| { | |||||
| var o = sess.run(c, | |||||
| new FeedItem(a, new NDArray(firstDoubleFeed, new Shape(rows, cols))), | |||||
| new FeedItem(b, new NDArray(secondDoubleFeed, new Shape(rows, cols)))); | |||||
| Assert.AreEqual((int)o, doubleResult); | |||||
| } | |||||
| // Testing `operator >=(Tensor x, double y) | |||||
| c = tf.reduce_sum(tf.reduce_sum(tf.cast(a >= doubleThreshold, tf.int32), 1)); | |||||
| using (var sess = tf.Session()) | |||||
| { | |||||
| var o = sess.run(c, | |||||
| new FeedItem(a, new NDArray(firstDoubleFeed, new Shape(rows, cols)))); | |||||
| Assert.AreEqual((int)o, doubleResult); | |||||
| } | |||||
| // Testing `operator >=(double x, Tensor y) | |||||
| c = tf.reduce_sum(tf.reduce_sum(tf.cast(doubleThreshold >= a, tf.int32), 1)); | |||||
| using (var sess = tf.Session()) | |||||
| { | |||||
| var o = sess.run(c, | |||||
| new FeedItem(a, new NDArray(firstDoubleFeed, new Shape(rows, cols)))); | |||||
| Assert.AreEqual((int)o, doubleResultTwo); | |||||
| } | |||||
| #endregion | |||||
| } | |||||
| [TestMethod] | |||||
| public void lessOrEqualThanOpTests() | |||||
| { | |||||
| const int rows = 2; // to avoid broadcasting effect | |||||
| const int cols = 10; | |||||
| #region intTest | |||||
| const int intThreshold = 10; | |||||
| var firstIntFeed = Enumerable.Range(0, rows * cols).ToArray(); | |||||
| var secondIntFeed = Enumerable.Repeat(intThreshold, rows * cols).ToArray(); | |||||
| var intResult = firstIntFeed.Count(elem => elem <= intThreshold); | |||||
| var intResultTwo = firstIntFeed.Count(elem => elem >= intThreshold); | |||||
| var a = tf.placeholder(tf.int32, shape: new TensorShape(rows, cols)); | |||||
| var b = tf.placeholder(tf.int32, shape: new TensorShape(rows, cols)); | |||||
| var c = tf.reduce_sum(tf.reduce_sum(tf.cast(tf.less_equal(a, b), tf.int32), 1)); | |||||
| using (var sess = tf.Session()) | |||||
| { | |||||
| var o = sess.run(c, | |||||
| new FeedItem(a, new NDArray(firstIntFeed, new Shape(rows, cols))), | |||||
| new FeedItem(b, new NDArray(secondIntFeed, new Shape(rows, cols)))); | |||||
| Assert.AreEqual((int)o, intResult); | |||||
| } | |||||
| // Testing `operator <=(Tensor x, Tensor y) | |||||
| c = tf.reduce_sum(tf.reduce_sum(tf.cast(a <= b, tf.int32), 1)); | |||||
| using (var sess = tf.Session()) | |||||
| { | |||||
| var o = sess.run(c, | |||||
| new FeedItem(a, new NDArray(firstIntFeed, new Shape(rows, cols))), | |||||
| new FeedItem(b, new NDArray(secondIntFeed, new Shape(rows, cols)))); | |||||
| Assert.AreEqual((int)o, intResult); | |||||
| } | |||||
| // Testing `operator <=(Tensor x, int y) | |||||
| c = tf.reduce_sum(tf.reduce_sum(tf.cast(a <= intThreshold, tf.int32), 1)); | |||||
| using (var sess = tf.Session()) | |||||
| { | |||||
| var o = sess.run(c, | |||||
| new FeedItem(a, new NDArray(firstIntFeed, new Shape(rows, cols)))); | |||||
| Assert.AreEqual((int)o, intResult); | |||||
| } | |||||
| // Testing `operator <=(int x, Tensor y) | |||||
| c = tf.reduce_sum(tf.reduce_sum(tf.cast(intThreshold <= a, tf.int32), 1)); | |||||
| using (var sess = tf.Session()) | |||||
| { | |||||
| var o = sess.run(c, | |||||
| new FeedItem(a, new NDArray(firstIntFeed, new Shape(rows, cols)))); | |||||
| Assert.AreEqual((int)o, intResultTwo); | |||||
| } | |||||
| #endregion | |||||
| #region floatTest | |||||
| const float floatThreshold = 10.0f; | |||||
| var firstFloatFeed = Enumerable.Range(0, rows * cols).Select(elem => (float)elem).ToArray(); | |||||
| var secondFloatFeed = Enumerable.Repeat(floatThreshold, rows * cols).ToArray(); | |||||
| var floatResult = firstFloatFeed.Count(elem => elem <= floatThreshold); | |||||
| var floatResultTwo = firstFloatFeed.Count(elem => elem >= floatThreshold); | |||||
| a = tf.placeholder(tf.float32, shape: new TensorShape(rows, cols)); | |||||
| b = tf.placeholder(tf.float32, shape: new TensorShape(rows, cols)); | |||||
| c = tf.reduce_sum(tf.reduce_sum(tf.cast(tf.less_equal(a, b), tf.int32), 1)); | |||||
| using (var sess = tf.Session()) | |||||
| { | |||||
| var o = sess.run(c, | |||||
| new FeedItem(a, new NDArray(firstFloatFeed, new Shape(rows, cols))), | |||||
| new FeedItem(b, new NDArray(secondFloatFeed, new Shape(rows, cols)))); | |||||
| Assert.AreEqual((int)o, floatResult); | |||||
| } | |||||
| // Testing `operator <=(Tensor x, Tensor y) | |||||
| c = tf.reduce_sum(tf.reduce_sum(tf.cast(a <= b, tf.int32), 1)); | |||||
| using (var sess = tf.Session()) | |||||
| { | |||||
| var o = sess.run(c, | |||||
| new FeedItem(a, new NDArray(firstFloatFeed, new Shape(rows, cols))), | |||||
| new FeedItem(b, new NDArray(secondFloatFeed, new Shape(rows, cols)))); | |||||
| Assert.AreEqual((int)o, floatResult); | |||||
| } | |||||
| // Testing `operator <=(Tensor x, float y) | |||||
| c = tf.reduce_sum(tf.reduce_sum(tf.cast(a <= floatThreshold, tf.int32), 1)); | |||||
| using (var sess = tf.Session()) | |||||
| { | |||||
| var o = sess.run(c, | |||||
| new FeedItem(a, new NDArray(firstFloatFeed, new Shape(rows, cols)))); | |||||
| Assert.AreEqual((int)o, floatResult); | |||||
| } | |||||
| // Testing `operator <=(float x, Tensor y) | |||||
| c = tf.reduce_sum(tf.reduce_sum(tf.cast(floatThreshold <= a, tf.int32), 1)); | |||||
| using (var sess = tf.Session()) | |||||
| { | |||||
| var o = sess.run(c, | |||||
| new FeedItem(a, new NDArray(firstFloatFeed, new Shape(rows, cols)))); | |||||
| Assert.AreEqual((int)o, floatResultTwo); | |||||
| } | |||||
| #endregion | |||||
| #region doubleTest | |||||
| const double doubleThreshold = 10.0; | |||||
| var firstDoubleFeed = Enumerable.Repeat(0, rows * cols).Select(elem => (double)elem).ToArray(); | |||||
| var secondDoubleFeed = Enumerable.Repeat(doubleThreshold, rows * cols).ToArray(); | |||||
| var doubleResult = firstDoubleFeed.Count(elem => elem <= doubleThreshold); | |||||
| var doubleResultTwo = firstDoubleFeed.Count(elem => elem >= doubleThreshold); | |||||
| a = tf.placeholder(tf.float64, shape: new TensorShape(rows, cols)); | |||||
| b = tf.placeholder(tf.float64, shape: new TensorShape(rows, cols)); | |||||
| c = tf.reduce_sum(tf.reduce_sum(tf.cast(tf.less_equal(a, b), tf.int32), 1)); | |||||
| using (var sess = tf.Session()) | |||||
| { | |||||
| var o = sess.run(c, | |||||
| new FeedItem(a, new NDArray(firstDoubleFeed, new Shape(rows, cols))), | |||||
| new FeedItem(b, new NDArray(secondDoubleFeed, new Shape(rows, cols)))); | |||||
| Assert.AreEqual((int)o, doubleResult); | |||||
| } | |||||
| // Testing `operator <=(Tensor x, Tensor y) | |||||
| c = tf.reduce_sum(tf.reduce_sum(tf.cast(a <= b, tf.int32), 1)); | |||||
| using (var sess = tf.Session()) | |||||
| { | |||||
| var o = sess.run(c, | |||||
| new FeedItem(a, new NDArray(firstDoubleFeed, new Shape(rows, cols))), | |||||
| new FeedItem(b, new NDArray(secondDoubleFeed, new Shape(rows, cols)))); | |||||
| Assert.AreEqual((int)o, doubleResult); | |||||
| } | |||||
| // Testing `operator <=(Tensor x, double y) | |||||
| c = tf.reduce_sum(tf.reduce_sum(tf.cast(a <= doubleThreshold, tf.int32), 1)); | |||||
| using (var sess = tf.Session()) | |||||
| { | |||||
| var o = sess.run(c, | |||||
| new FeedItem(a, new NDArray(firstDoubleFeed, new Shape(rows, cols)))); | |||||
| Assert.AreEqual((int)o, doubleResult); | |||||
| } | |||||
| // Testing `operator <=(double x, Tensor y) | |||||
| c = tf.reduce_sum(tf.reduce_sum(tf.cast(doubleThreshold <= a, tf.int32), 1)); | |||||
| using (var sess = tf.Session()) | |||||
| { | |||||
| var o = sess.run(c, | |||||
| new FeedItem(a, new NDArray(firstDoubleFeed, new Shape(rows, cols)))); | |||||
| Assert.AreEqual((int)o, doubleResultTwo); | |||||
| } | |||||
| #endregion | |||||
| } | |||||
| } | } | ||||
| } | } | ||||