| @@ -26,12 +26,6 @@ Global | |||
| Debug|Any CPU = Debug|Any CPU | |||
| Debug|x64 = Debug|x64 | |||
| Debug|x86 = Debug|x86 | |||
| Debug-Minimal|Any CPU = Debug-Minimal|Any CPU | |||
| Debug-Minimal|x64 = Debug-Minimal|x64 | |||
| Debug-Minimal|x86 = Debug-Minimal|x86 | |||
| Publish|Any CPU = Publish|Any CPU | |||
| Publish|x64 = Publish|x64 | |||
| Publish|x86 = Publish|x86 | |||
| Release|Any CPU = Release|Any CPU | |||
| Release|x64 = Release|x64 | |||
| Release|x86 = Release|x86 | |||
| @@ -43,18 +37,6 @@ Global | |||
| {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug|x64.Build.0 = Debug|x64 | |||
| {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug|x86.ActiveCfg = Debug|Any CPU | |||
| {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug|x86.Build.0 = Debug|Any CPU | |||
| {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU | |||
| {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU | |||
| {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug-Minimal|x64.ActiveCfg = Debug|x64 | |||
| {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug-Minimal|x64.Build.0 = Debug|x64 | |||
| {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug-Minimal|x86.ActiveCfg = Debug|Any CPU | |||
| {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Debug-Minimal|x86.Build.0 = Debug|Any CPU | |||
| {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Publish|Any CPU.ActiveCfg = Release|Any CPU | |||
| {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Publish|Any CPU.Build.0 = Release|Any CPU | |||
| {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Publish|x64.ActiveCfg = Release|x64 | |||
| {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Publish|x64.Build.0 = Release|x64 | |||
| {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Publish|x86.ActiveCfg = Release|Any CPU | |||
| {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Publish|x86.Build.0 = Release|Any CPU | |||
| {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Release|Any CPU.ActiveCfg = Release|Any CPU | |||
| {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Release|Any CPU.Build.0 = Release|Any CPU | |||
| {FD682AC0-7B2D-45D3-8B0D-C6D678B04144}.Release|x64.ActiveCfg = Release|x64 | |||
| @@ -67,18 +49,6 @@ Global | |||
| {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug|x64.Build.0 = Debug|x64 | |||
| {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug|x86.ActiveCfg = Debug|Any CPU | |||
| {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug|x86.Build.0 = Debug|Any CPU | |||
| {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU | |||
| {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU | |||
| {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug-Minimal|x64.ActiveCfg = Debug|x64 | |||
| {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug-Minimal|x64.Build.0 = Debug|x64 | |||
| {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug-Minimal|x86.ActiveCfg = Debug|Any CPU | |||
| {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Debug-Minimal|x86.Build.0 = Debug|Any CPU | |||
| {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Publish|Any CPU.ActiveCfg = Release|Any CPU | |||
| {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Publish|Any CPU.Build.0 = Release|Any CPU | |||
| {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Publish|x64.ActiveCfg = Release|x64 | |||
| {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Publish|x64.Build.0 = Release|x64 | |||
| {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Publish|x86.ActiveCfg = Release|Any CPU | |||
| {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Publish|x86.Build.0 = Release|Any CPU | |||
| {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Release|Any CPU.ActiveCfg = Release|Any CPU | |||
| {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Release|Any CPU.Build.0 = Release|Any CPU | |||
| {3A6EB896-604F-4E25-B677-B8103BCF3D2E}.Release|x64.ActiveCfg = Release|x64 | |||
| @@ -91,18 +61,6 @@ Global | |||
| {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug|x64.Build.0 = Debug|x64 | |||
| {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug|x86.ActiveCfg = Debug|Any CPU | |||
| {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug|x86.Build.0 = Debug|Any CPU | |||
| {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU | |||
| {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU | |||
| {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug-Minimal|x64.ActiveCfg = Debug|x64 | |||
| {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug-Minimal|x64.Build.0 = Debug|x64 | |||
| {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug-Minimal|x86.ActiveCfg = Debug|Any CPU | |||
| {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Debug-Minimal|x86.Build.0 = Debug|Any CPU | |||
| {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Publish|Any CPU.ActiveCfg = Release|Any CPU | |||
| {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Publish|Any CPU.Build.0 = Release|Any CPU | |||
| {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Publish|x64.ActiveCfg = Release|x64 | |||
| {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Publish|x64.Build.0 = Release|x64 | |||
| {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Publish|x86.ActiveCfg = Release|Any CPU | |||
| {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Publish|x86.Build.0 = Release|Any CPU | |||
| {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Release|Any CPU.ActiveCfg = Release|Any CPU | |||
| {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Release|Any CPU.Build.0 = Release|Any CPU | |||
| {23C28035-2FCE-41F3-9A12-E73CE8A5AE32}.Release|x64.ActiveCfg = Release|x64 | |||
| @@ -115,18 +73,6 @@ Global | |||
| {03F06299-3F4B-4449-A709-3A647657BC0C}.Debug|x64.Build.0 = Debug|x64 | |||
| {03F06299-3F4B-4449-A709-3A647657BC0C}.Debug|x86.ActiveCfg = Debug|Any CPU | |||
| {03F06299-3F4B-4449-A709-3A647657BC0C}.Debug|x86.Build.0 = Debug|Any CPU | |||
| {03F06299-3F4B-4449-A709-3A647657BC0C}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU | |||
| {03F06299-3F4B-4449-A709-3A647657BC0C}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU | |||
| {03F06299-3F4B-4449-A709-3A647657BC0C}.Debug-Minimal|x64.ActiveCfg = Debug|Any CPU | |||
| {03F06299-3F4B-4449-A709-3A647657BC0C}.Debug-Minimal|x64.Build.0 = Debug|Any CPU | |||
| {03F06299-3F4B-4449-A709-3A647657BC0C}.Debug-Minimal|x86.ActiveCfg = Debug|Any CPU | |||
| {03F06299-3F4B-4449-A709-3A647657BC0C}.Debug-Minimal|x86.Build.0 = Debug|Any CPU | |||
| {03F06299-3F4B-4449-A709-3A647657BC0C}.Publish|Any CPU.ActiveCfg = Debug|Any CPU | |||
| {03F06299-3F4B-4449-A709-3A647657BC0C}.Publish|Any CPU.Build.0 = Debug|Any CPU | |||
| {03F06299-3F4B-4449-A709-3A647657BC0C}.Publish|x64.ActiveCfg = Debug|Any CPU | |||
| {03F06299-3F4B-4449-A709-3A647657BC0C}.Publish|x64.Build.0 = Debug|Any CPU | |||
| {03F06299-3F4B-4449-A709-3A647657BC0C}.Publish|x86.ActiveCfg = Debug|Any CPU | |||
| {03F06299-3F4B-4449-A709-3A647657BC0C}.Publish|x86.Build.0 = Debug|Any CPU | |||
| {03F06299-3F4B-4449-A709-3A647657BC0C}.Release|Any CPU.ActiveCfg = Release|Any CPU | |||
| {03F06299-3F4B-4449-A709-3A647657BC0C}.Release|Any CPU.Build.0 = Release|Any CPU | |||
| {03F06299-3F4B-4449-A709-3A647657BC0C}.Release|x64.ActiveCfg = Release|Any CPU | |||
| @@ -139,18 +85,6 @@ Global | |||
| {49D71826-C03D-4FA7-9BAC-22C1327E65CF}.Debug|x64.Build.0 = Debug|x64 | |||
| {49D71826-C03D-4FA7-9BAC-22C1327E65CF}.Debug|x86.ActiveCfg = Debug|Any CPU | |||
| {49D71826-C03D-4FA7-9BAC-22C1327E65CF}.Debug|x86.Build.0 = Debug|Any CPU | |||
| {49D71826-C03D-4FA7-9BAC-22C1327E65CF}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU | |||
| {49D71826-C03D-4FA7-9BAC-22C1327E65CF}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU | |||
| {49D71826-C03D-4FA7-9BAC-22C1327E65CF}.Debug-Minimal|x64.ActiveCfg = Debug|x64 | |||
| {49D71826-C03D-4FA7-9BAC-22C1327E65CF}.Debug-Minimal|x64.Build.0 = Debug|x64 | |||
| {49D71826-C03D-4FA7-9BAC-22C1327E65CF}.Debug-Minimal|x86.ActiveCfg = Debug|Any CPU | |||
| {49D71826-C03D-4FA7-9BAC-22C1327E65CF}.Debug-Minimal|x86.Build.0 = Debug|Any CPU | |||
| {49D71826-C03D-4FA7-9BAC-22C1327E65CF}.Publish|Any CPU.ActiveCfg = Release|Any CPU | |||
| {49D71826-C03D-4FA7-9BAC-22C1327E65CF}.Publish|Any CPU.Build.0 = Release|Any CPU | |||
| {49D71826-C03D-4FA7-9BAC-22C1327E65CF}.Publish|x64.ActiveCfg = Debug|x64 | |||
| {49D71826-C03D-4FA7-9BAC-22C1327E65CF}.Publish|x64.Build.0 = Debug|x64 | |||
| {49D71826-C03D-4FA7-9BAC-22C1327E65CF}.Publish|x86.ActiveCfg = Release|Any CPU | |||
| {49D71826-C03D-4FA7-9BAC-22C1327E65CF}.Publish|x86.Build.0 = Release|Any CPU | |||
| {49D71826-C03D-4FA7-9BAC-22C1327E65CF}.Release|Any CPU.ActiveCfg = Release|Any CPU | |||
| {49D71826-C03D-4FA7-9BAC-22C1327E65CF}.Release|Any CPU.Build.0 = Release|Any CPU | |||
| {49D71826-C03D-4FA7-9BAC-22C1327E65CF}.Release|x64.ActiveCfg = Release|x64 | |||
| @@ -163,18 +97,6 @@ Global | |||
| {1AB8108D-4FFE-4A16-88E7-328EAF686370}.Debug|x64.Build.0 = Debug|x64 | |||
| {1AB8108D-4FFE-4A16-88E7-328EAF686370}.Debug|x86.ActiveCfg = Debug|Any CPU | |||
| {1AB8108D-4FFE-4A16-88E7-328EAF686370}.Debug|x86.Build.0 = Debug|Any CPU | |||
| {1AB8108D-4FFE-4A16-88E7-328EAF686370}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU | |||
| {1AB8108D-4FFE-4A16-88E7-328EAF686370}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU | |||
| {1AB8108D-4FFE-4A16-88E7-328EAF686370}.Debug-Minimal|x64.ActiveCfg = Debug|Any CPU | |||
| {1AB8108D-4FFE-4A16-88E7-328EAF686370}.Debug-Minimal|x64.Build.0 = Debug|Any CPU | |||
| {1AB8108D-4FFE-4A16-88E7-328EAF686370}.Debug-Minimal|x86.ActiveCfg = Debug|Any CPU | |||
| {1AB8108D-4FFE-4A16-88E7-328EAF686370}.Debug-Minimal|x86.Build.0 = Debug|Any CPU | |||
| {1AB8108D-4FFE-4A16-88E7-328EAF686370}.Publish|Any CPU.ActiveCfg = Debug|Any CPU | |||
| {1AB8108D-4FFE-4A16-88E7-328EAF686370}.Publish|Any CPU.Build.0 = Debug|Any CPU | |||
| {1AB8108D-4FFE-4A16-88E7-328EAF686370}.Publish|x64.ActiveCfg = Debug|Any CPU | |||
| {1AB8108D-4FFE-4A16-88E7-328EAF686370}.Publish|x64.Build.0 = Debug|Any CPU | |||
| {1AB8108D-4FFE-4A16-88E7-328EAF686370}.Publish|x86.ActiveCfg = Debug|Any CPU | |||
| {1AB8108D-4FFE-4A16-88E7-328EAF686370}.Publish|x86.Build.0 = Debug|Any CPU | |||
| {1AB8108D-4FFE-4A16-88E7-328EAF686370}.Release|Any CPU.ActiveCfg = Release|Any CPU | |||
| {1AB8108D-4FFE-4A16-88E7-328EAF686370}.Release|Any CPU.Build.0 = Release|Any CPU | |||
| {1AB8108D-4FFE-4A16-88E7-328EAF686370}.Release|x64.ActiveCfg = Release|Any CPU | |||
| @@ -187,18 +109,6 @@ Global | |||
| {F17AAECB-960A-4E18-A270-BAD776F0E55B}.Debug|x64.Build.0 = Debug|x64 | |||
| {F17AAECB-960A-4E18-A270-BAD776F0E55B}.Debug|x86.ActiveCfg = Debug|Any CPU | |||
| {F17AAECB-960A-4E18-A270-BAD776F0E55B}.Debug|x86.Build.0 = Debug|Any CPU | |||
| {F17AAECB-960A-4E18-A270-BAD776F0E55B}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU | |||
| {F17AAECB-960A-4E18-A270-BAD776F0E55B}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU | |||
| {F17AAECB-960A-4E18-A270-BAD776F0E55B}.Debug-Minimal|x64.ActiveCfg = Debug|Any CPU | |||
| {F17AAECB-960A-4E18-A270-BAD776F0E55B}.Debug-Minimal|x64.Build.0 = Debug|Any CPU | |||
| {F17AAECB-960A-4E18-A270-BAD776F0E55B}.Debug-Minimal|x86.ActiveCfg = Debug|Any CPU | |||
| {F17AAECB-960A-4E18-A270-BAD776F0E55B}.Debug-Minimal|x86.Build.0 = Debug|Any CPU | |||
| {F17AAECB-960A-4E18-A270-BAD776F0E55B}.Publish|Any CPU.ActiveCfg = Debug|Any CPU | |||
| {F17AAECB-960A-4E18-A270-BAD776F0E55B}.Publish|Any CPU.Build.0 = Debug|Any CPU | |||
| {F17AAECB-960A-4E18-A270-BAD776F0E55B}.Publish|x64.ActiveCfg = Debug|Any CPU | |||
| {F17AAECB-960A-4E18-A270-BAD776F0E55B}.Publish|x64.Build.0 = Debug|Any CPU | |||
| {F17AAECB-960A-4E18-A270-BAD776F0E55B}.Publish|x86.ActiveCfg = Debug|Any CPU | |||
| {F17AAECB-960A-4E18-A270-BAD776F0E55B}.Publish|x86.Build.0 = Debug|Any CPU | |||
| {F17AAECB-960A-4E18-A270-BAD776F0E55B}.Release|Any CPU.ActiveCfg = Release|Any CPU | |||
| {F17AAECB-960A-4E18-A270-BAD776F0E55B}.Release|Any CPU.Build.0 = Release|Any CPU | |||
| {F17AAECB-960A-4E18-A270-BAD776F0E55B}.Release|x64.ActiveCfg = Release|Any CPU | |||
| @@ -211,18 +121,6 @@ Global | |||
| {84CA35F8-99FC-408E-8DF3-5AA175E5EFD3}.Debug|x64.Build.0 = Debug|x64 | |||
| {84CA35F8-99FC-408E-8DF3-5AA175E5EFD3}.Debug|x86.ActiveCfg = Debug|Any CPU | |||
| {84CA35F8-99FC-408E-8DF3-5AA175E5EFD3}.Debug|x86.Build.0 = Debug|Any CPU | |||
| {84CA35F8-99FC-408E-8DF3-5AA175E5EFD3}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU | |||
| {84CA35F8-99FC-408E-8DF3-5AA175E5EFD3}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU | |||
| {84CA35F8-99FC-408E-8DF3-5AA175E5EFD3}.Debug-Minimal|x64.ActiveCfg = Debug|Any CPU | |||
| {84CA35F8-99FC-408E-8DF3-5AA175E5EFD3}.Debug-Minimal|x64.Build.0 = Debug|Any CPU | |||
| {84CA35F8-99FC-408E-8DF3-5AA175E5EFD3}.Debug-Minimal|x86.ActiveCfg = Debug|Any CPU | |||
| {84CA35F8-99FC-408E-8DF3-5AA175E5EFD3}.Debug-Minimal|x86.Build.0 = Debug|Any CPU | |||
| {84CA35F8-99FC-408E-8DF3-5AA175E5EFD3}.Publish|Any CPU.ActiveCfg = Debug|Any CPU | |||
| {84CA35F8-99FC-408E-8DF3-5AA175E5EFD3}.Publish|Any CPU.Build.0 = Debug|Any CPU | |||
| {84CA35F8-99FC-408E-8DF3-5AA175E5EFD3}.Publish|x64.ActiveCfg = Debug|Any CPU | |||
| {84CA35F8-99FC-408E-8DF3-5AA175E5EFD3}.Publish|x64.Build.0 = Debug|Any CPU | |||
| {84CA35F8-99FC-408E-8DF3-5AA175E5EFD3}.Publish|x86.ActiveCfg = Debug|Any CPU | |||
| {84CA35F8-99FC-408E-8DF3-5AA175E5EFD3}.Publish|x86.Build.0 = Debug|Any CPU | |||
| {84CA35F8-99FC-408E-8DF3-5AA175E5EFD3}.Release|Any CPU.ActiveCfg = Release|Any CPU | |||
| {84CA35F8-99FC-408E-8DF3-5AA175E5EFD3}.Release|Any CPU.Build.0 = Release|Any CPU | |||
| {84CA35F8-99FC-408E-8DF3-5AA175E5EFD3}.Release|x64.ActiveCfg = Release|Any CPU | |||
| @@ -235,18 +133,6 @@ Global | |||
| {79EB56DF-E29E-4AE2-A7D9-FE403FD919BA}.Debug|x64.Build.0 = Debug|x64 | |||
| {79EB56DF-E29E-4AE2-A7D9-FE403FD919BA}.Debug|x86.ActiveCfg = Debug|Any CPU | |||
| {79EB56DF-E29E-4AE2-A7D9-FE403FD919BA}.Debug|x86.Build.0 = Debug|Any CPU | |||
| {79EB56DF-E29E-4AE2-A7D9-FE403FD919BA}.Debug-Minimal|Any CPU.ActiveCfg = Debug|Any CPU | |||
| {79EB56DF-E29E-4AE2-A7D9-FE403FD919BA}.Debug-Minimal|Any CPU.Build.0 = Debug|Any CPU | |||
| {79EB56DF-E29E-4AE2-A7D9-FE403FD919BA}.Debug-Minimal|x64.ActiveCfg = Debug|Any CPU | |||
| {79EB56DF-E29E-4AE2-A7D9-FE403FD919BA}.Debug-Minimal|x64.Build.0 = Debug|Any CPU | |||
| {79EB56DF-E29E-4AE2-A7D9-FE403FD919BA}.Debug-Minimal|x86.ActiveCfg = Debug|Any CPU | |||
| {79EB56DF-E29E-4AE2-A7D9-FE403FD919BA}.Debug-Minimal|x86.Build.0 = Debug|Any CPU | |||
| {79EB56DF-E29E-4AE2-A7D9-FE403FD919BA}.Publish|Any CPU.ActiveCfg = Debug|Any CPU | |||
| {79EB56DF-E29E-4AE2-A7D9-FE403FD919BA}.Publish|Any CPU.Build.0 = Debug|Any CPU | |||
| {79EB56DF-E29E-4AE2-A7D9-FE403FD919BA}.Publish|x64.ActiveCfg = Debug|Any CPU | |||
| {79EB56DF-E29E-4AE2-A7D9-FE403FD919BA}.Publish|x64.Build.0 = Debug|Any CPU | |||
| {79EB56DF-E29E-4AE2-A7D9-FE403FD919BA}.Publish|x86.ActiveCfg = Debug|Any CPU | |||
| {79EB56DF-E29E-4AE2-A7D9-FE403FD919BA}.Publish|x86.Build.0 = Debug|Any CPU | |||
| {79EB56DF-E29E-4AE2-A7D9-FE403FD919BA}.Release|Any CPU.ActiveCfg = Release|Any CPU | |||
| {79EB56DF-E29E-4AE2-A7D9-FE403FD919BA}.Release|Any CPU.Build.0 = Release|Any CPU | |||
| {79EB56DF-E29E-4AE2-A7D9-FE403FD919BA}.Release|x64.ActiveCfg = Release|Any CPU | |||
| @@ -0,0 +1,64 @@ | |||
| using System; | |||
| using System.Collections.Generic; | |||
| using System.IO; | |||
| using System.Text; | |||
| using System.Linq; | |||
| using static Tensorflow.Binding; | |||
| using System.Text.RegularExpressions; | |||
| namespace Tensorflow | |||
| { | |||
| public class Diagnostician | |||
| { | |||
| public void Diagnose(string log) | |||
| { | |||
| var lines = File.ReadAllLines(log); | |||
| foreach(var (i, line) in enumerate(lines)) | |||
| { | |||
| if(line.StartsWith("New Tensor ")) | |||
| { | |||
| var pointers = Regex.Matches(line, "0x[0-9a-f]{16}"); | |||
| var tensorHandle = pointers[0].Value; | |||
| var tensorDataHandle = pointers[1].Value; | |||
| if (lines.Skip(i).Count(x => x.StartsWith("Delete Tensor ") | |||
| && x.Contains(tensorHandle) | |||
| && x.Contains(tensorDataHandle)) == 0) | |||
| Console.WriteLine(line); | |||
| } | |||
| else if (line.StartsWith("New EagerTensorHandle ")) | |||
| { | |||
| var pointers = Regex.Matches(line, "0x[0-9a-f]{16}"); | |||
| var tensorHandle = pointers[0].Value; | |||
| var del = $"Delete EagerTensorHandle {tensorHandle}"; | |||
| if (lines.Skip(i).Count(x => x == del) == 0) | |||
| Console.WriteLine(line); | |||
| } | |||
| else if (line.StartsWith("Take EagerTensorHandle ")) | |||
| { | |||
| var pointers = Regex.Matches(line, "0x[0-9a-f]{16}"); | |||
| var eagerTensorHandle = pointers[0].Value; | |||
| var tensorHandle = pointers[1].Value; | |||
| var delTensor = $"Delete Tensor {tensorHandle}"; | |||
| var delEagerTensor = $"Delete EagerTensorHandle {eagerTensorHandle}"; | |||
| if (lines.Skip(i).Count(x => x.StartsWith(delTensor)) == 0 | |||
| || lines.Skip(i).Count(x => x.StartsWith(delEagerTensor)) == 0) | |||
| Console.WriteLine(line); | |||
| } | |||
| else if (line.StartsWith("Created Resource ")) | |||
| { | |||
| var pointers = Regex.Matches(line, "0x[0-9a-f]{16}"); | |||
| var eagerTensorHandle = pointers[0].Value; | |||
| var delTensor = $"Deleted Resource {eagerTensorHandle}"; | |||
| if (lines.Skip(i).Count(x => x.StartsWith(delTensor)) == 0) | |||
| Console.WriteLine(line); | |||
| } | |||
| } | |||
| } | |||
| } | |||
| } | |||
| @@ -0,0 +1,152 @@ | |||
| using NumSharp; | |||
| using System; | |||
| using Tensorflow.Keras.ArgsDefinition; | |||
| using Tensorflow.Keras.Engine.DataAdapters; | |||
| using static Tensorflow.Binding; | |||
| using static Tensorflow.KerasApi; | |||
| namespace Tensorflow | |||
| { | |||
| class MemoryBasicTest | |||
| { | |||
| public Action<int, int> Placeholder | |||
| => (epoch, iterate) => | |||
| { | |||
| var ph = array_ops.placeholder(tf.float32, (10, 512, 512, 3)); | |||
| }; | |||
| /// <summary> | |||
| /// | |||
| /// </summary> | |||
| public Action<int, int> Constant | |||
| => (epoch, iterate) => | |||
| { | |||
| var tensor = tf.constant(3112.0f); | |||
| }; | |||
| public Action<int, int> Constant2x3 | |||
| => (epoch, iterate) => | |||
| { | |||
| var nd = np.arange(1000).reshape(10, 100); | |||
| var tensor = tf.constant(nd); | |||
| var data = tensor.numpy(); | |||
| }; | |||
| public Action<int, int> ConstantString | |||
| => (epoch, iterate) => | |||
| { | |||
| var tensor = tf.constant(new string[] | |||
| { | |||
| "Biden immigration bill would put millions of illegal immigrants on 8-year fast-track to citizenship", | |||
| "The Associated Press, which also reported that the eight-year path is in the bill.", | |||
| "The bill would also include provisions to stem the flow of migration by addressing root causes of migration from south of the border." | |||
| }); | |||
| var data = tensor.numpy(); | |||
| }; | |||
| public Action<int, int> Variable | |||
| => (epoch, iterate) => | |||
| { | |||
| var nd = np.arange(1 * 256 * 256 * 3).reshape(1, 256, 256, 3); | |||
| ResourceVariable variable = tf.Variable(nd); | |||
| }; | |||
| public Action<int, int> VariableRead | |||
| => (epoch, iterate) => | |||
| { | |||
| var nd = np.zeros(1 * 256 * 256 * 3).astype(np.float32).reshape(1, 256, 256, 3); | |||
| ResourceVariable variable = tf.Variable(nd); | |||
| var nd2 = np.arange(1 * 256 * 256 * 3).astype(np.float32).reshape(1, 256, 256, 3); | |||
| variable.assign(nd2); | |||
| for (int i = 0; i< 100; i++) | |||
| { | |||
| var v = variable.numpy(); | |||
| } | |||
| }; | |||
| public Action<int, int> MathAdd | |||
| => (epoch, iterate) => | |||
| { | |||
| var x = tf.constant(3112.0f); | |||
| var y = tf.constant(3112.0f); | |||
| var z = x + y; | |||
| }; | |||
| public Action<int, int> Gradient | |||
| => (epoch, iterate) => | |||
| { | |||
| var w = tf.constant(3112.0f); | |||
| using var tape = tf.GradientTape(); | |||
| tape.watch(w); | |||
| var loss = w * w; | |||
| var grad = tape.gradient(loss, w); | |||
| }; | |||
| public Action<int, int> Conv2DWithTensor | |||
| => (epoch, iterate) => | |||
| { | |||
| var input = array_ops.zeros((10, 32, 32, 3), dtypes.float32); | |||
| var filter = array_ops.zeros((3, 3, 3, 32), dtypes.float32); | |||
| var strides = new[] { 1, 1, 1, 1 }; | |||
| var dilations = new[] { 1, 1, 1, 1 }; | |||
| var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName, | |||
| "Conv2D", null, | |||
| null, | |||
| input, filter, | |||
| "strides", strides, | |||
| "use_cudnn_on_gpu", true, | |||
| "padding", "VALID", | |||
| "explicit_paddings", new int[0], | |||
| "data_format", "NHWC", | |||
| "dilations", dilations); | |||
| }; | |||
| public Action<int, int> Conv2DWithVariable | |||
| => (epoch, iterate) => | |||
| { | |||
| var input = array_ops.zeros((10, 32, 32, 3), dtypes.float32); | |||
| var filter = tf.Variable(array_ops.zeros((3, 3, 3, 32), dtypes.float32)); | |||
| var strides = new[] { 1, 1, 1, 1 }; | |||
| var dilations = new[] { 1, 1, 1, 1 }; | |||
| var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName, | |||
| "Conv2D", null, | |||
| null, | |||
| input, filter, | |||
| "strides", strides, | |||
| "use_cudnn_on_gpu", true, | |||
| "padding", "VALID", | |||
| "explicit_paddings", new int[0], | |||
| "data_format", "NHWC", | |||
| "dilations", dilations); | |||
| }; | |||
| public Action<int, int> Dataset | |||
| => (epoch, iterate) => | |||
| { | |||
| TensorShape shape = (16, 32, 32, 3); | |||
| var images = np.arange(shape.size).astype(np.float32).reshape(shape.dims); | |||
| var data_handler = new DataHandler(new DataHandlerArgs | |||
| { | |||
| X = images, | |||
| BatchSize = 2, | |||
| StepsPerEpoch = -1, | |||
| InitialEpoch = 0, | |||
| Epochs = 2, | |||
| MaxQueueSize = 10, | |||
| Workers = 1, | |||
| UseMultiprocessing = false, | |||
| StepsPerExecution = tf.Variable(1) | |||
| }); | |||
| /*foreach (var (_epoch, iterator) in data_handler.enumerate_epochs()) | |||
| { | |||
| foreach (var step in data_handler.steps()) | |||
| iterator.next(); | |||
| }*/ | |||
| }; | |||
| } | |||
| } | |||
| @@ -0,0 +1,30 @@ | |||
| using NumSharp; | |||
| using System; | |||
| using System.Collections.Generic; | |||
| using System.Text; | |||
| using Tensorflow.Functions; | |||
| using static Tensorflow.Binding; | |||
| namespace Tensorflow | |||
| { | |||
| class MemoryFuncGraphTest | |||
| { | |||
| public Action<int, int> ConcreteFunction | |||
| => (epoch, iterate) => | |||
| { | |||
| var func = new ConcreteFunction(Guid.NewGuid().ToString()); | |||
| func.Enter(); | |||
| var input = tf.placeholder(tf.float32); | |||
| var output = permutation(input); | |||
| func.ToGraph(input, output); | |||
| func.Exit(); | |||
| }; | |||
| Tensor permutation(Tensor tensor) | |||
| { | |||
| TensorShape shape = (8, 64, 64, 3); | |||
| var images = np.arange(shape.size).astype(np.float32).reshape(shape.dims); | |||
| return tf.constant(images); | |||
| } | |||
| } | |||
| } | |||
| @@ -0,0 +1,51 @@ | |||
| using NumSharp; | |||
| using System; | |||
| using static Tensorflow.Binding; | |||
| using static Tensorflow.KerasApi; | |||
| namespace Tensorflow | |||
| { | |||
| class MemoryKerasTest | |||
| { | |||
| public Action<int, int> Conv2DLayer | |||
| => (epoch, iterate) => | |||
| { | |||
| var input_shape = new int[] { 4, 512, 512, 3 }; | |||
| var x = tf.random.normal(input_shape); | |||
| var conv2d = keras.layers.Conv2D(2, 3, activation: keras.activations.Relu); | |||
| var output = conv2d.Apply(x); | |||
| }; | |||
| public Action<int, int> InputLayer | |||
| => (epoch, iterate) => | |||
| { | |||
| TensorShape shape = (32, 256, 256, 3); // 48M | |||
| var images = np.arange(shape.size).astype(np.float32).reshape(shape.dims); | |||
| var inputs = keras.Input((shape.dims[1], shape.dims[2], 3)); | |||
| var conv2d = keras.layers.Conv2D(32, kernel_size: (3, 3), | |||
| activation: keras.activations.Linear); | |||
| var outputs = conv2d.Apply(inputs); | |||
| }; | |||
| public Action<int, int> Prediction | |||
| => (epoch, iterate) => | |||
| { | |||
| TensorShape shape = (32, 256, 256, 3); // 48M | |||
| var images = np.arange(shape.size).astype(np.float32).reshape(shape.dims); | |||
| var inputs = keras.Input((shape.dims[1], shape.dims[2], 3)); | |||
| var conv2d = keras.layers.Conv2D(32, kernel_size: (3, 3), | |||
| activation: keras.activations.Linear).Apply(inputs); | |||
| var flatten = keras.layers.Flatten().Apply(inputs); | |||
| var outputs = keras.layers.Dense(10).Apply(flatten); | |||
| var model = keras.Model(inputs, outputs, "prediction"); | |||
| for (int i = 0; i < 10; i++) | |||
| { | |||
| model.predict(images, batch_size: 8); | |||
| } | |||
| }; | |||
| } | |||
| } | |||
| @@ -4,6 +4,7 @@ using System.Threading; | |||
| using System.Threading.Tasks; | |||
| using NumSharp; | |||
| using static Tensorflow.Binding; | |||
| using static Tensorflow.KerasApi; | |||
| namespace Tensorflow | |||
| { | |||
| @@ -11,38 +12,44 @@ namespace Tensorflow | |||
| { | |||
| public void WarmUp() | |||
| { | |||
| TensorShape shape = (1, 32, 32, 3); | |||
| np.arange(shape.size).astype(np.float32).reshape(shape.dims); | |||
| print($"tensorflow native version: v{tf.VERSION}"); | |||
| tf.Context.ensure_initialized(); | |||
| var a = tf.constant(np.ones(10, 10)); | |||
| var b = tf.Variable(a); | |||
| var c = tf.Variable(b); | |||
| var d = b * c; | |||
| print(d.numpy()); | |||
| GC.WaitForPendingFinalizers(); | |||
| GC.Collect(); | |||
| Thread.Sleep(1000); | |||
| GC.WaitForPendingFinalizers(); | |||
| } | |||
| public void Execute(int epoch, int iterate, Action<int> process) | |||
| public void Execute(int epoch, int iterate, Action<int, int> process) | |||
| { | |||
| GC.Collect(); | |||
| GC.WaitForPendingFinalizers(); | |||
| var initialTotalMemory = Process.GetCurrentProcess().PrivateMemorySize64; | |||
| print($"{process.Method.Name} started..."); | |||
| // new thread to run | |||
| Task.Run(() => | |||
| for (int i = 0; i < epoch; i++) | |||
| { | |||
| for (int i = 0; i < epoch; i++) | |||
| { | |||
| var initialMemory = Process.GetCurrentProcess().PrivateMemorySize64;// GC.GetTotalMemory(true); | |||
| process(iterate); | |||
| var finalMemory = Process.GetCurrentProcess().PrivateMemorySize64; //GC.GetTotalMemory(true); | |||
| print($"Epoch {i}: {Format(finalMemory - initialMemory)}."); | |||
| GC.Collect(); | |||
| GC.WaitForPendingFinalizers(); | |||
| } | |||
| }).Wait(); | |||
| print($"Total {process.Method.Name} usage {Format(Process.GetCurrentProcess().PrivateMemorySize64)}"); | |||
| var initialMemory = Process.GetCurrentProcess().PrivateMemorySize64; | |||
| for (int j = 0; j < iterate; j++) | |||
| process(i, j); | |||
| keras.backend.clear_session(); | |||
| GC.Collect(); | |||
| GC.WaitForPendingFinalizers(); | |||
| var finalMemory = Process.GetCurrentProcess().PrivateMemorySize64; | |||
| print($"Epoch {i}: {Format(finalMemory - initialMemory)}."); | |||
| } | |||
| var finalTotalMemory = Process.GetCurrentProcess().PrivateMemorySize64; | |||
| print($"Memory usage difference: {Format(finalTotalMemory - initialTotalMemory)} / {Format(Process.GetCurrentProcess().PrivateMemorySize64)}"); | |||
| } | |||
| private string Format(long usage) | |||
| @@ -1,113 +0,0 @@ | |||
| using NumSharp; | |||
| using System; | |||
| using static Tensorflow.Binding; | |||
| namespace Tensorflow | |||
| { | |||
| class MemoryTestingCases | |||
| { | |||
| /// <summary> | |||
| /// | |||
| /// </summary> | |||
| public Action<int> Constant | |||
| => (iterate) => | |||
| { | |||
| for (int i = 0; i < iterate; i++) | |||
| { | |||
| var tensor = tf.constant(3112.0f); | |||
| } | |||
| }; | |||
| public Action<int> Constant2x3 | |||
| => (iterate) => | |||
| { | |||
| var nd = np.arange(1000).reshape(10, 100); | |||
| for (int i = 0; i < iterate; i++) | |||
| { | |||
| var tensor = tf.constant(nd); | |||
| var data = tensor.numpy(); | |||
| } | |||
| }; | |||
| public Action<int> Variable | |||
| => (iterate) => | |||
| { | |||
| for (int i = 0; i < iterate; i++) | |||
| { | |||
| var nd = np.arange(128 * 128 * 3).reshape(128, 128, 3); | |||
| var variable = tf.Variable(nd); | |||
| } | |||
| }; | |||
| public Action<int> MathAdd | |||
| => (iterate) => | |||
| { | |||
| var x = tf.constant(3112.0f); | |||
| var y = tf.constant(3112.0f); | |||
| for (int i = 0; i < iterate; i++) | |||
| { | |||
| var z = x + y; | |||
| } | |||
| }; | |||
| public Action<int> Gradient | |||
| => (iterate) => | |||
| { | |||
| for (int i = 0; i < iterate; i++) | |||
| { | |||
| var w = tf.constant(3112.0f); | |||
| using var tape = tf.GradientTape(); | |||
| tape.watch(w); | |||
| var loss = w * w; | |||
| var grad = tape.gradient(loss, w); | |||
| } | |||
| }; | |||
| public Action<int> Conv2dWithVariable | |||
| => (iterate) => | |||
| { | |||
| for (int i = 0; i < iterate; i++) | |||
| { | |||
| var input = array_ops.zeros((10, 32, 32, 3), dtypes.float32); | |||
| var filter = tf.Variable(array_ops.zeros((3, 3, 3, 32), dtypes.float32)); | |||
| var strides = new[] { 1, 1, 1, 1 }; | |||
| var dilations = new[] { 1, 1, 1, 1 }; | |||
| var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName, | |||
| "Conv2D", null, | |||
| null, | |||
| input, filter, | |||
| "strides", strides, | |||
| "use_cudnn_on_gpu", true, | |||
| "padding", "VALID", | |||
| "explicit_paddings", new int[0], | |||
| "data_format", "NHWC", | |||
| "dilations", dilations); | |||
| } | |||
| }; | |||
| public Action<int> Conv2dWithTensor | |||
| => (iterate) => | |||
| { | |||
| for (int i = 0; i < iterate; i++) | |||
| { | |||
| var input = array_ops.zeros((10, 32, 32, 3), dtypes.float32); | |||
| var filter = array_ops.zeros((3, 3, 3, 32), dtypes.float32); | |||
| var strides = new[] { 1, 1, 1, 1 }; | |||
| var dilations = new[] { 1, 1, 1, 1 }; | |||
| var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName, | |||
| "Conv2D", null, | |||
| null, | |||
| input, filter, | |||
| "strides", strides, | |||
| "use_cudnn_on_gpu", true, | |||
| "padding", "VALID", | |||
| "explicit_paddings", new int[0], | |||
| "data_format", "NHWC", | |||
| "dilations", dilations); | |||
| } | |||
| }; | |||
| } | |||
| } | |||
| @@ -1,4 +1,5 @@ | |||
| using System; | |||
| using static Tensorflow.Binding; | |||
| namespace Tensorflow | |||
| { | |||
| @@ -6,6 +7,9 @@ namespace Tensorflow | |||
| { | |||
| static void Main(string[] args) | |||
| { | |||
| var diag = new Diagnostician(); | |||
| // diag.Diagnose(@"D:\memory.txt"); | |||
| // this class is used explor new features. | |||
| var exploring = new Exploring(); | |||
| // exploring.Run(); | |||
| @@ -14,34 +18,72 @@ namespace Tensorflow | |||
| var mm = new MemoryMonitor(); | |||
| // warm up tensorflow.net 37.3M. | |||
| mm.WarmUp(); | |||
| var cases = new MemoryTestingCases(); | |||
| BasicTest(mm); | |||
| KerasTest(mm); | |||
| FuncGraph(mm); | |||
| // 85M | |||
| Console.WriteLine("Finished."); | |||
| Console.ReadLine(); | |||
| } | |||
| static void BasicTest(MemoryMonitor mm) | |||
| { | |||
| int batchSize = 1000; | |||
| var basic = new MemoryBasicTest(); | |||
| // 1 million placeholder | |||
| /*tf.compat.v1.disable_eager_execution(); | |||
| mm.Execute(10, 100 * batchSize, basic.Placeholder); | |||
| tf.enable_eager_execution();*/ | |||
| // 1 million tensor | |||
| mm.Execute(10, 100 * batchSize, cases.Constant); | |||
| mm.Execute(10, 100 * batchSize, basic.Constant); | |||
| // explaination of constant | |||
| mm.Execute(10, 100 * batchSize, cases.Constant2x3); | |||
| mm.Execute(10, 100 * batchSize, basic.Constant2x3); | |||
| mm.Execute(10, 100 * batchSize, basic.ConstantString); | |||
| // +0M | |||
| mm.Execute(10, batchSize, cases.Conv2dWithTensor); | |||
| // 100K float variable. | |||
| mm.Execute(10, batchSize, basic.Variable); | |||
| // 100K float variable 84M. | |||
| mm.Execute(10, batchSize, cases.Variable); | |||
| // 1 million math. | |||
| mm.Execute(10, 100 * batchSize, basic.MathAdd); | |||
| // +45M memory leak | |||
| mm.Execute(10, batchSize, cases.Conv2dWithVariable); | |||
| // Conv2d in constant tensor | |||
| mm.Execute(10, batchSize, basic.Conv2DWithTensor); | |||
| // 1 million math add 39M. | |||
| mm.Execute(10, 100 * batchSize, cases.MathAdd); | |||
| // Conv2d in variable | |||
| mm.Execute(10, batchSize, basic.Conv2DWithVariable); | |||
| // 100K gradient 44M. | |||
| mm.Execute(10, 10 * batchSize, cases.Gradient); | |||
| mm.Execute(10, 10 * batchSize, basic.Gradient); | |||
| // 95M | |||
| Console.WriteLine("Finished."); | |||
| Console.ReadLine(); | |||
| // memory leak when increasing the epoch | |||
| mm.Execute(10, 10, basic.Dataset); | |||
| } | |||
| static void KerasTest(MemoryMonitor mm) | |||
| { | |||
| var keras = new MemoryKerasTest(); | |||
| // +1M (10,50) | |||
| mm.Execute(10, 1, keras.Conv2DLayer); | |||
| mm.Execute(10, 50, keras.InputLayer); | |||
| mm.Execute(10, 10, keras.Prediction); | |||
| } | |||
| static void FuncGraph(MemoryMonitor mm) | |||
| { | |||
| var func = new MemoryFuncGraphTest(); | |||
| mm.Execute(10, 100, func.ConcreteFunction); | |||
| } | |||
| } | |||
| } | |||
| @@ -6,6 +6,7 @@ | |||
| <RootNamespace>Tensorflow</RootNamespace> | |||
| <AssemblyName>Tensorflow</AssemblyName> | |||
| <Platforms>AnyCPU;x64</Platforms> | |||
| <LangVersion>9.0</LangVersion> | |||
| </PropertyGroup> | |||
| <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|AnyCPU'"> | |||
| @@ -4,7 +4,6 @@ namespace Tensorflow | |||
| { | |||
| public static partial class Binding | |||
| { | |||
| [DebuggerHidden] | |||
| public static tensorflow tf { get; } = New<tensorflow>(); | |||
| /// <summary> | |||
| @@ -81,7 +81,7 @@ namespace Tensorflow.Contexts | |||
| /// Checks whether the current thread has eager execution enabled. | |||
| /// </summary> | |||
| /// <returns></returns> | |||
| // [DebuggerStepThrough] | |||
| [DebuggerStepThrough] | |||
| public bool executing_eagerly() | |||
| { | |||
| if(context_switches.Count() == 0) | |||
| @@ -136,8 +136,11 @@ namespace Tensorflow.Contexts | |||
| public void reset_context() | |||
| { | |||
| ops.reset_uid(); | |||
| // tf.defaultSession = null; | |||
| ops.reset_default_graph(); | |||
| context_switches.Clear(); | |||
| tf.Context.ensure_initialized(); | |||
| if (_handle != null) | |||
| c_api.TFE_ContextClearCaches(_handle); | |||
| } | |||
| @@ -1,6 +0,0 @@ | |||
| namespace Tensorflow | |||
| { | |||
| public class IteratorBase | |||
| { | |||
| } | |||
| } | |||
| @@ -1,26 +0,0 @@ | |||
| using System; | |||
| namespace Tensorflow | |||
| { | |||
| /// <summary> | |||
| /// An object which cleans up an iterator resource handle. | |||
| /// </summary> | |||
| public class IteratorResourceDeleter : IDisposable | |||
| { | |||
| Tensor _handle; | |||
| Tensor _deleter; | |||
| dataset_ops ops; | |||
| public IteratorResourceDeleter(Tensor handle, Tensor deleter) | |||
| { | |||
| _handle = handle; | |||
| _deleter = deleter; | |||
| ops = new dataset_ops(); | |||
| } | |||
| public void Dispose() | |||
| { | |||
| ops.delete_iterator(_handle, _deleter); | |||
| } | |||
| } | |||
| } | |||
| @@ -15,7 +15,7 @@ namespace Tensorflow | |||
| bool preserve_cardinality = false, | |||
| bool use_legacy_function = false) : base(input_dataset) | |||
| { | |||
| using var func = new ConcreteFunction($"{map_func.Method.Name}_{Guid.NewGuid()}"); | |||
| var func = new ConcreteFunction($"{map_func.Method.Name}_{Guid.NewGuid()}"); | |||
| func.Enter(); | |||
| var input = tf.placeholder(input_dataset.element_spec[0].dtype); | |||
| var output = map_func(input); | |||
| @@ -7,8 +7,6 @@ namespace Tensorflow | |||
| /// </summary> | |||
| public class OptimizeDataset : UnaryUnchangedStructureDataset | |||
| { | |||
| Tensor _optimizations; | |||
| public OptimizeDataset(IDatasetV2 dataset, | |||
| string[] optimizations = null, | |||
| string[] optimization_configs = null) : | |||
| @@ -19,7 +17,7 @@ namespace Tensorflow | |||
| if (optimization_configs == null) | |||
| optimization_configs = new string[0]; | |||
| _optimizations = tf.convert_to_tensor(optimizations, dtype: TF_DataType.TF_STRING, name: "optimizations"); | |||
| var _optimizations = tf.convert_to_tensor(optimizations, dtype: TF_DataType.TF_STRING, name: "optimizations"); | |||
| variant_tensor = ops.optimize_dataset( | |||
| _input_dataset.variant_tensor, | |||
| _optimizations, | |||
| @@ -8,14 +8,13 @@ namespace Tensorflow | |||
| /// <summary> | |||
| /// An iterator producing tf.Tensor objects from a tf.data.Dataset. | |||
| /// </summary> | |||
| public class OwnedIterator : IteratorBase, IDisposable | |||
| public class OwnedIterator : IDisposable | |||
| { | |||
| IDatasetV2 _dataset; | |||
| TensorSpec[] _element_spec; | |||
| dataset_ops ops = new dataset_ops(); | |||
| Tensor _iterator_resource; | |||
| Tensor _deleter; | |||
| IteratorResourceDeleter _resource_deleter; | |||
| Tensor _iterator_resource; | |||
| public OwnedIterator(IDatasetV2 dataset) | |||
| { | |||
| @@ -29,9 +28,6 @@ namespace Tensorflow | |||
| _element_spec = dataset.element_spec; | |||
| (_iterator_resource, _deleter) = ops.anonymous_iterator_v2(_dataset.output_types, _dataset.output_shapes); | |||
| ops.make_iterator(dataset.variant_tensor, _iterator_resource); | |||
| // Delete the resource when this object is deleted | |||
| _resource_deleter = new IteratorResourceDeleter(_iterator_resource, _deleter); | |||
| } | |||
| public Tensor[] next() | |||
| @@ -51,7 +47,7 @@ namespace Tensorflow | |||
| public void Dispose() | |||
| { | |||
| _resource_deleter.Dispose(); | |||
| tf.Runner.Execute(tf.Context, "DeleteIterator", 0, new[] { _iterator_resource, _deleter }, null); | |||
| } | |||
| } | |||
| } | |||
| @@ -7,17 +7,15 @@ namespace Tensorflow | |||
| /// </summary> | |||
| public class PrefetchDataset : UnaryUnchangedStructureDataset | |||
| { | |||
| Tensor _buffer_size; | |||
| public PrefetchDataset(IDatasetV2 input_dataset, | |||
| long buffer_size = -1, | |||
| int? slack_period = null) : | |||
| base(input_dataset) | |||
| { | |||
| _buffer_size = tf.convert_to_tensor(buffer_size, dtype: TF_DataType.TF_INT64, name: "buffer_size"); | |||
| var buffer_size_tensor = tf.convert_to_tensor(buffer_size, dtype: TF_DataType.TF_INT64, name: "buffer_size"); | |||
| variant_tensor = ops.prefetch_dataset(input_dataset.variant_tensor, | |||
| _buffer_size, | |||
| buffer_size_tensor, | |||
| input_dataset.output_types, | |||
| input_dataset.output_shapes, | |||
| slack_period: slack_period); | |||
| @@ -5,21 +5,17 @@ namespace Tensorflow.Data | |||
| { | |||
| public class RangeDataset : DatasetSource | |||
| { | |||
| Tensor start; | |||
| Tensor step; | |||
| Tensor stop; | |||
| public RangeDataset(int stop, | |||
| int start = 0, | |||
| int step = 1, | |||
| TF_DataType output_type = TF_DataType.TF_INT64) | |||
| { | |||
| this.start = tf.convert_to_tensor((long)start); | |||
| this.step = tf.convert_to_tensor((long)step); | |||
| this.stop = tf.convert_to_tensor((long)stop); | |||
| var start_tensor = tf.convert_to_tensor((long)start); | |||
| var step_tensor = tf.convert_to_tensor((long)step); | |||
| var stop_tensor = tf.convert_to_tensor((long)stop); | |||
| structure = new TensorSpec[] { new TensorSpec(new int[0], dtype: output_type) }; | |||
| variant_tensor = ops.range_dataset(this.start, this.stop, this.step, output_types, output_shapes); | |||
| variant_tensor = ops.range_dataset(start_tensor, stop_tensor, step_tensor, output_types, output_shapes); | |||
| } | |||
| } | |||
| } | |||
| @@ -5,14 +5,12 @@ | |||
| /// </summary> | |||
| public class RepeatDataset : UnaryUnchangedStructureDataset | |||
| { | |||
| Tensor _count; | |||
| public RepeatDataset(IDatasetV2 input_dataset, int count = -1) : | |||
| base(input_dataset) | |||
| { | |||
| _count = constant_op.constant(count, dtype: TF_DataType.TF_INT64, name: "count"); | |||
| var count_tensor = constant_op.constant(count, dtype: TF_DataType.TF_INT64, name: "count"); | |||
| variant_tensor = ops.repeat_dataset(input_dataset.variant_tensor, | |||
| _count, | |||
| count_tensor, | |||
| input_dataset.output_types, | |||
| input_dataset.output_shapes); | |||
| } | |||
| @@ -42,8 +42,7 @@ namespace Tensorflow.Eager | |||
| int num_outputs) | |||
| { | |||
| var status = tf.Status; | |||
| var op = GetOp(ctx, op_name, status); | |||
| status.Check(true); | |||
| using var op = GetOp(ctx, op_name, status); | |||
| c_api.TFE_OpSetDevice(op, device_name, status.Handle); | |||
| if (status.ok()) | |||
| { | |||
| @@ -67,7 +66,7 @@ namespace Tensorflow.Eager | |||
| c_api.TFE_Execute(op, outputs, out num_outputs, status.Handle); | |||
| status.Check(true); | |||
| } | |||
| return outputs.Select(x => new EagerTensor(x, op)).ToArray(); | |||
| return outputs.Select(x => new EagerTensor(x)).ToArray(); | |||
| } | |||
| } | |||
| } | |||
| @@ -46,7 +46,7 @@ namespace Tensorflow.Eager | |||
| op_exec_info.run_callbacks = op_exec_info.run_gradient_callback || op_exec_info.run_post_exec_callbacks; | |||
| var status = tf.Status; | |||
| var op = GetOp(ctx, opName, status); | |||
| using var op = GetOp(ctx, opName, status); | |||
| var op_def = tf.get_default_graph().GetOpDef(opName); | |||
| @@ -158,7 +158,8 @@ namespace Tensorflow.Eager | |||
| c_api.TFE_Execute(op, retVals, out num_retvals, status.Handle); | |||
| status.Check(true); | |||
| var flat_result = retVals.Select(x => new EagerTensor(x, op)).ToArray(); | |||
| var flat_result = retVals.Select(x => new EagerTensor(x)).ToArray(); | |||
| if (op_exec_info.run_callbacks) | |||
| { | |||
| @@ -183,9 +184,7 @@ namespace Tensorflow.Eager | |||
| status.Check(true); | |||
| return op;*/ | |||
| var op = c_api.TFE_NewOp(ctx.Handle, op_or_function_name, status.Handle); | |||
| #if TRACK_TENSOR_LIFE | |||
| print($"New OpHandle 0x{op.DangerousGetHandle().ToString("x16")}"); | |||
| #endif | |||
| status.Check(true); | |||
| return op; | |||
| } | |||
| @@ -1,57 +1,56 @@ | |||
| using NumSharp; | |||
| using System; | |||
| using System.Linq; | |||
| using static Tensorflow.Binding; | |||
| namespace Tensorflow.Eager | |||
| { | |||
| public partial class EagerTensor | |||
| { | |||
| SafeOpHandle _opHandle; | |||
| public EagerTensor(SafeTensorHandleHandle handle, SafeOpHandle opHandle) : base(IntPtr.Zero) | |||
| public EagerTensor(SafeTensorHandleHandle handle) : base(IntPtr.Zero) | |||
| { | |||
| _opHandle = opHandle; | |||
| _id = ops.uid(); | |||
| EagerTensorHandle = handle; | |||
| Resolve(); | |||
| } | |||
| public EagerTensor(string value, string device_name) : base(value) | |||
| { | |||
| SetEagerTensorHandleAndResolve(); | |||
| NewEagerTensorHandle(_handle); | |||
| } | |||
| public EagerTensor(byte[] value, string device_name, TF_DataType dtype) : base(value, dType: dtype) | |||
| { | |||
| SetEagerTensorHandleAndResolve(); | |||
| NewEagerTensorHandle(_handle); | |||
| } | |||
| public EagerTensor(string[] value, string device_name) : base(value) | |||
| { | |||
| SetEagerTensorHandleAndResolve(); | |||
| NewEagerTensorHandle(_handle); | |||
| } | |||
| public EagerTensor(NDArray value, string device_name) : base(value) | |||
| { | |||
| SetEagerTensorHandleAndResolve(); | |||
| NewEagerTensorHandle(_handle); | |||
| } | |||
| void SetEagerTensorHandleAndResolve() | |||
| void NewEagerTensorHandle(IntPtr h) | |||
| { | |||
| EagerTensorHandle = c_api.TFE_NewTensorHandle(_handle, tf.Status.Handle); | |||
| Resolve(); | |||
| _id = ops.uid(); | |||
| EagerTensorHandle = c_api.TFE_NewTensorHandle(h, tf.Status.Handle); | |||
| tf.Status.Check(true); | |||
| #if TRACK_TENSOR_LIFE | |||
| print($"New EagerTensorHandle {EagerTensorHandle} {Id} From 0x{h.ToString("x16")}"); | |||
| #endif | |||
| } | |||
| public EagerTensor Resolve() | |||
| private void Resolve() | |||
| { | |||
| _id = ops.uid(); | |||
| if (_handle == IntPtr.Zero) | |||
| _handle = c_api.TFE_TensorHandleResolve(EagerTensorHandle, tf.Status.Handle); | |||
| _handle = c_api.TFE_TensorHandleResolve(EagerTensorHandle, tf.Status.Handle); | |||
| tf.Status.Check(true); | |||
| #if TRACK_TENSOR_LIFE | |||
| print($"New TensorHandle {Id} 0x{_handle.ToString("x16")}"); | |||
| print($"New EagerTensorHandle {Id} {EagerTensorHandle}"); | |||
| print($"Take EagerTensorHandle {EagerTensorHandle} {Id} Resolving 0x{_handle.ToString("x16")}"); | |||
| #endif | |||
| return this; | |||
| } | |||
| /// <summary> | |||
| @@ -80,22 +79,10 @@ namespace Tensorflow.Eager | |||
| } | |||
| } | |||
| public override IntPtr ToPointer() | |||
| => EagerTensorHandle?.DangerousGetHandle() ?? IntPtr.Zero; | |||
| protected override void DisposeManagedResources() | |||
| { | |||
| base.DisposeManagedResources(); | |||
| } | |||
| protected override void DisposeUnmanagedResources(IntPtr handle) | |||
| { | |||
| base.DisposeUnmanagedResources(handle); | |||
| EagerTensorHandle.Dispose(); | |||
| if (_opHandle != null) | |||
| _opHandle.Dispose(); | |||
| } | |||
| } | |||
| } | |||
| @@ -4,8 +4,7 @@ namespace Tensorflow.Eager | |||
| { | |||
| public partial class EagerTensor | |||
| { | |||
| [Obsolete("Implicit conversion of EagerTensor to IntPtr is not supported.", error: true)] | |||
| public static implicit operator IntPtr(EagerTensor tensor) | |||
| => throw new NotSupportedException(); | |||
| => tensor.EagerTensorHandle.DangerousGetHandle(); | |||
| } | |||
| } | |||
| @@ -30,16 +30,11 @@ namespace Tensorflow.Eager | |||
| public SafeOpHandle(IntPtr handle) | |||
| : base(handle) | |||
| { | |||
| #if TRACK_TENSOR_LIFE | |||
| print($"Get OpHandle 0x{handle.ToString("x16")}"); | |||
| #endif | |||
| } | |||
| protected override bool ReleaseHandle() | |||
| { | |||
| #if TRACK_TENSOR_LIFE | |||
| print($"Delete OpHandle 0x{handle.ToString("x16")}"); | |||
| #endif | |||
| c_api.TFE_DeleteOp(handle); | |||
| SetHandle(IntPtr.Zero); | |||
| return true; | |||
| @@ -10,24 +10,14 @@ namespace Tensorflow.Functions | |||
| /// <summary> | |||
| /// | |||
| /// </summary> | |||
| public class ConcreteFunction : IDisposable | |||
| public class ConcreteFunction | |||
| { | |||
| IntPtr _handle; | |||
| FuncGraph func_graph; | |||
| public Tensor[] Inputs => func_graph.Inputs; | |||
| public Tensor[] CapturedInputs => func_graph.external_captures; | |||
| public string Name | |||
| { | |||
| get | |||
| { | |||
| if (func_graph != null) | |||
| return func_graph.FuncName; | |||
| public string Name => func_graph?.FuncName; | |||
| return _handle == IntPtr.Zero ? string.Empty : c_api.StringPiece(c_api.TF_FunctionName(_handle)); | |||
| } | |||
| } | |||
| public Tensor[] Outputs; | |||
| public Type ReturnType; | |||
| public TensorSpec[] OutputStructure; | |||
| @@ -48,39 +38,37 @@ namespace Tensorflow.Functions | |||
| { | |||
| string func_name = $"{func.Method.Name}_{Guid.NewGuid()}"; | |||
| // IntPtr func_handle; | |||
| using var graph = new FuncGraph(func_name); | |||
| graph.as_default(); | |||
| func_graph = new FuncGraph(func_name); | |||
| func_graph.as_default(); | |||
| var input = tf.placeholder(dtype); | |||
| var output = func(input); | |||
| var opers = graph._nodes_by_name.Values.Select(x => x as Operation).ToArray(); | |||
| _handle = graph.ToGraph(opers, | |||
| var opers = func_graph._nodes_by_name.Values.Select(x => x as Operation).ToArray(); | |||
| func_graph.ToGraph(opers, | |||
| new[] { input }, | |||
| new[] { output }, | |||
| null); | |||
| graph.Exit(); | |||
| func_graph.Exit(); | |||
| } | |||
| public ConcreteFunction(Func<Tensor, IDatasetV2> func, TF_DataType dtype) | |||
| { | |||
| string func_name = $"{func.Method.Name}_{Guid.NewGuid()}"; | |||
| // IntPtr func_handle; | |||
| using var graph = new FuncGraph(func_name); | |||
| graph.as_default(); | |||
| func_graph = new FuncGraph(func_name); | |||
| func_graph.as_default(); | |||
| var input = tf.placeholder(dtype); | |||
| var output = func(input); | |||
| OutputStructure = output.structure; | |||
| var opers = graph._nodes_by_name.Values.Select(x => x as Operation).ToArray(); | |||
| _handle = graph.ToGraph(opers, | |||
| var opers = func_graph._nodes_by_name.Values.Select(x => x as Operation).ToArray(); | |||
| func_graph.ToGraph(opers, | |||
| new[] { input }, | |||
| new[] { output.variant_tensor }, | |||
| null); | |||
| graph.Exit(); | |||
| func_graph.Exit(); | |||
| } | |||
| public ConcreteFunction(Func<Tensors, Tensors> func, | |||
| @@ -89,8 +77,8 @@ namespace Tensorflow.Functions | |||
| string func_name = $"{func.Method.Name}_{Guid.NewGuid()}"; | |||
| // IntPtr func_handle; | |||
| using var graph = new FuncGraph(func_name); | |||
| graph.as_default(); | |||
| func_graph = new FuncGraph(func_name); | |||
| func_graph.as_default(); | |||
| var inputs = new Tensors(); | |||
| foreach(var (i, dtype) in enumerate(dtypes)) | |||
| @@ -98,15 +86,15 @@ namespace Tensorflow.Functions | |||
| Outputs = func(inputs); | |||
| OutputStructure = Outputs.Select(x => x.ToTensorSpec()).ToArray(); | |||
| var opers = graph._nodes_by_name.Values.Select(x => x as Operation).ToArray(); | |||
| _handle = graph.ToGraph(opers, inputs, Outputs, null); | |||
| graph.Exit(); | |||
| var opers = func_graph._nodes_by_name.Values.Select(x => x as Operation).ToArray(); | |||
| func_graph.ToGraph(opers, inputs, Outputs, null); | |||
| func_graph.Exit(); | |||
| } | |||
| public void ToGraph(Tensors inputs, Tensors outputs) | |||
| { | |||
| var opers = func_graph._nodes_by_name.Values.Select(x => x as Operation).ToArray(); | |||
| _handle = func_graph.ToGraph(opers, | |||
| func_graph.ToGraph(opers, | |||
| inputs, | |||
| outputs, | |||
| null); | |||
| @@ -180,11 +168,5 @@ namespace Tensorflow.Functions | |||
| public override string ToString() | |||
| => Name; | |||
| public void Dispose() | |||
| { | |||
| c_api.TFE_ContextRemoveFunction(tf.Context.Handle, Name, tf.Status.Handle); | |||
| c_api.TF_DeleteFunction(_handle); | |||
| } | |||
| } | |||
| } | |||
| @@ -10,20 +10,18 @@ namespace Tensorflow.Graphs | |||
| { | |||
| string func_name = $"{func.Method.Name}_{Guid.NewGuid()}"; | |||
| // IntPtr func_handle; | |||
| using (var graph = new FuncGraph(func_name)) | |||
| { | |||
| graph.as_default(); | |||
| var input = tf.placeholder(tf.int32); | |||
| var output = func(input); | |||
| var graph = new FuncGraph(func_name); | |||
| graph.as_default(); | |||
| var opers = graph._nodes_by_name.Values.Select(x => x as Operation).ToArray(); | |||
| var func_handle = graph.ToGraph(opers, | |||
| new[] { input }, | |||
| new[] { output }, | |||
| null); | |||
| graph.Exit(); | |||
| } | |||
| var input = tf.placeholder(tf.int32); | |||
| var output = func(input); | |||
| var opers = graph._nodes_by_name.Values.Select(x => x as Operation).ToArray(); | |||
| graph.ToGraph(opers, | |||
| new[] { input }, | |||
| new[] { output }, | |||
| null); | |||
| graph.Exit(); | |||
| return (Tensor input) => | |||
| @@ -42,21 +40,19 @@ namespace Tensorflow.Graphs | |||
| { | |||
| string func_name = $"{func.Method.Name}_{Guid.NewGuid()}"; | |||
| // IntPtr func_handle; | |||
| using (var graph = new FuncGraph(func_name)) | |||
| { | |||
| graph.as_default(); | |||
| var input1 = tf.placeholder(tf.int32); | |||
| var input2 = tf.placeholder(tf.int32); | |||
| var output = func(input1, input2); | |||
| var graph = new FuncGraph(func_name); | |||
| graph.as_default(); | |||
| var input1 = tf.placeholder(tf.int32); | |||
| var input2 = tf.placeholder(tf.int32); | |||
| var output = func(input1, input2); | |||
| var opers = graph._nodes_by_name.Values.Select(x => x as Operation).ToArray(); | |||
| var func_handle = graph.ToGraph(opers, | |||
| new[] { input1, input2 }, | |||
| new[] { output }, | |||
| null); | |||
| graph.Exit(); | |||
| } | |||
| var opers = graph._nodes_by_name.Values.Select(x => x as Operation).ToArray(); | |||
| graph.ToGraph(opers, | |||
| new[] { input1, input2 }, | |||
| new[] { output }, | |||
| null); | |||
| graph.Exit(); | |||
| return (Tensor a, Tensor b) => | |||
| { | |||
| @@ -13,8 +13,7 @@ namespace Tensorflow.Graphs | |||
| /// </summary> | |||
| public class FuncGraph : Graph | |||
| { | |||
| // _handle == IntPtr.Zero ? string.Empty : c_api.StringPiece(c_api.TF_FunctionName(_handle)); | |||
| IntPtr func_handle; | |||
| IntPtr _func_graph_handle; | |||
| public string FuncName => _graph_key; | |||
| public Tensors Inputs { get; set; } = new Tensors(); | |||
| @@ -60,12 +59,12 @@ namespace Tensorflow.Graphs | |||
| _handle = handle; | |||
| } | |||
| public IntPtr ToGraph(Operation[] opers, | |||
| public void ToGraph(Operation[] opers, | |||
| Tensor[] inputs, Tensor[] outputs, | |||
| string[] output_names) | |||
| { | |||
| using var status = new Status(); | |||
| func_handle = c_api.TF_GraphToFunction(_handle, | |||
| var status = new Status(); | |||
| _func_graph_handle = c_api.TF_GraphToFunction(_handle, | |||
| _graph_key, | |||
| false, | |||
| opers.Length, | |||
| @@ -82,19 +81,17 @@ namespace Tensorflow.Graphs | |||
| SetAttrs(); | |||
| c_api.TF_GraphCopyFunction(outer_graph, func_handle, IntPtr.Zero, status.Handle); | |||
| status.Check(true); | |||
| // c_api.TF_GraphCopyFunction(outer_graph, _func_graph_handle, IntPtr.Zero, status.Handle); | |||
| // status.Check(true); | |||
| c_api.TFE_ContextAddFunction(tf.Context.Handle, func_handle, status.Handle); | |||
| c_api.TFE_ContextAddFunction(tf.Context.Handle, _func_graph_handle, status.Handle); | |||
| status.Check(true); | |||
| _graph_key = c_api.StringPiece(c_api.TF_FunctionName(func_handle)); | |||
| _graph_key = c_api.StringPiece(c_api.TF_FunctionName(_func_graph_handle)); | |||
| Inputs = inputs; | |||
| // mark_as_return | |||
| Outputs = outputs;// .Select(x => array_ops.identity(x)).ToArray(); | |||
| return func_handle; | |||
| } | |||
| public override Operation create_op(string op_type, Tensor[] inputs, TF_DataType[] dtypes, TF_DataType[] input_types = null, string name = null, Dictionary<string, AttrValue> attrs = null, OpDef op_def = null, bool compute_device = true) | |||
| @@ -233,7 +230,7 @@ namespace Tensorflow.Graphs | |||
| { | |||
| S = ByteString.CopyFromUtf8(attr_value) | |||
| }.ToByteArray(); | |||
| c_api.TF_FunctionSetAttrValueProto(func_handle, _name, serialized, serialized.Length, tf.Status.Handle); | |||
| c_api.TF_FunctionSetAttrValueProto(_func_graph_handle, _name, serialized, serialized.Length, tf.Status.Handle); | |||
| tf.Status.Check(true); | |||
| } | |||
| } | |||
| @@ -250,5 +247,12 @@ namespace Tensorflow.Graphs | |||
| tf.Context.restore_mode(); | |||
| ops.pop_graph(); | |||
| } | |||
| protected override void DisposeUnmanagedResources(IntPtr handle) | |||
| { | |||
| c_api.TFE_ContextRemoveFunction(tf.Context.Handle, _graph_key, tf.Status.Handle); | |||
| c_api.TF_DeleteFunction(_func_graph_handle); | |||
| base.DisposeUnmanagedResources(handle); | |||
| } | |||
| } | |||
| } | |||
| @@ -302,7 +302,7 @@ namespace Tensorflow | |||
| public void device(string device_name) | |||
| { | |||
| throw new NotImplementedException(""); | |||
| } | |||
| private void _create_op_helper(Operation op, bool compute_device = true) | |||
| @@ -8,7 +8,7 @@ namespace Tensorflow.Keras.Engine | |||
| { | |||
| Tensors input_tensors { get; } | |||
| Tensors Outputs { get; } | |||
| ILayer Layer { get; set; } | |||
| ILayer Layer { get; } | |||
| List<Tensor> KerasInputs { get; set; } | |||
| INode[] ParentNodes { get; } | |||
| IEnumerable<(ILayer, int, int, Tensor)> iterate_inbound(); | |||
| @@ -11,14 +11,12 @@ | |||
| public int NodeIndex => node_index; | |||
| int tensor_index; | |||
| public int TensorIndex => tensor_index; | |||
| Tensor tensor; | |||
| public KerasHistory(ILayer layer, int node_index, int tensor_index, Tensor tensor) | |||
| public KerasHistory(ILayer layer, int node_index, int tensor_index) | |||
| { | |||
| this.layer = layer; | |||
| this.node_index = node_index; | |||
| this.tensor_index = tensor_index; | |||
| this.tensor = tensor; | |||
| } | |||
| public void Deconstruct(out ILayer layer, out int node_index, out int tensor_index) | |||
| @@ -29,6 +27,6 @@ | |||
| } | |||
| public override string ToString() | |||
| => $"{layer.GetType().Name} {layer.Name} {tensor.name}"; | |||
| => $"{layer.GetType().Name} {layer.Name}"; | |||
| } | |||
| } | |||
| @@ -16,6 +16,8 @@ namespace Tensorflow.Keras | |||
| List<IVariableV1> trainable_weights { get; } | |||
| List<IVariableV1> non_trainable_weights { get; } | |||
| TensorShape output_shape { get; } | |||
| TensorShape BatchInputShape { get; } | |||
| TF_DataType DType { get; } | |||
| int count_params(); | |||
| LayerArgs get_config(); | |||
| } | |||
| @@ -72,6 +72,10 @@ namespace Tensorflow | |||
| public TensorShape output_shape => throw new NotImplementedException(); | |||
| public TensorShape BatchInputShape => throw new NotImplementedException(); | |||
| public TF_DataType DType => throw new NotImplementedException(); | |||
| public RnnCell(bool trainable = true, | |||
| string name = null, | |||
| TF_DataType dtype = TF_DataType.DtInvalid, | |||
| @@ -65,7 +65,7 @@ namespace Tensorflow | |||
| return gen_control_flow_ops.next_iteration(data, name: name); | |||
| } | |||
| public static Operation Assert(Tensor condition, object[] data, long? summarize = null, string name = null) | |||
| public static Operation Assert(Tensor condition, object[] data, long summarize = 3, string name = null) | |||
| { | |||
| if (tf.executing_eagerly()) | |||
| { | |||
| @@ -82,7 +82,7 @@ namespace Tensorflow | |||
| condition = ops.convert_to_tensor(condition, name: "Condition"); | |||
| Func<Operation[]> true_assert = () => | |||
| { | |||
| var assert = gen_logging_ops._assert(condition, data, summarize, name: "Assert"); | |||
| var assert = gen_logging_ops.assert(condition, data, summarize, name: "Assert"); | |||
| return new Operation[] { assert }; | |||
| }; | |||
| @@ -21,10 +21,17 @@ namespace Tensorflow | |||
| { | |||
| public class gen_logging_ops | |||
| { | |||
| public static Operation _assert(Tensor condition, object[] data, long? summarize = 3, string name = null) | |||
| public static Operation assert(Tensor condition, object[] data, long summarize = 3, string name = null) | |||
| { | |||
| if (!summarize.HasValue) | |||
| summarize = 3; | |||
| if (tf.Context.executing_eagerly()) | |||
| { | |||
| var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName, | |||
| "Assert", name, | |||
| null, | |||
| new object[] { condition, data, summarize }); | |||
| return results[0]; | |||
| } | |||
| var _op = tf.OpDefLib._apply_op_helper("Assert", name, args: new { condition, data, summarize }); | |||
| @@ -68,7 +68,7 @@ namespace Tensorflow | |||
| null, | |||
| resource, value); | |||
| return results.Length == 0 ? null : results[0]; | |||
| return null; | |||
| } | |||
| var _op = tf.OpDefLib._apply_op_helper("AssignVariableOp", name, new { resource, value }); | |||
| @@ -113,7 +113,8 @@ namespace Tensorflow | |||
| "container", container, | |||
| "shared_name", shared_name, | |||
| "dtype", dtype, | |||
| "shape", shape.dims); | |||
| "shape", shape.dims, | |||
| "allowed_devices", new string[0]); | |||
| return results[0]; | |||
| } | |||
| @@ -129,6 +130,28 @@ namespace Tensorflow | |||
| return _op.output; | |||
| } | |||
| public static Tensor destroy_resource_op(Tensor resource, bool ignore_lookup_error = true, string name = null) | |||
| { | |||
| if (tf.Context.executing_eagerly()) | |||
| { | |||
| var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName, | |||
| "DestroyResourceOp", name, | |||
| null, | |||
| resource, | |||
| "ignore_lookup_error", ignore_lookup_error); | |||
| return results.Length == 0 ? null : results[0]; | |||
| } | |||
| var _op = tf.OpDefLib._apply_op_helper("DestroyResourceOp", name, new | |||
| { | |||
| resource, | |||
| ignore_lookup_error | |||
| }); | |||
| return _op.output; | |||
| } | |||
| /// <summary> | |||
| /// Reads the value of a variable. | |||
| /// </summary> | |||
| @@ -26,7 +26,7 @@ namespace Tensorflow | |||
| /// </summary> | |||
| public static class resource_variable_ops | |||
| { | |||
| public static ITensorOrOperation shape_safe_assign_variable_handle(Tensor handle, int[] shape, Tensor value, string name = null) | |||
| public static Operation shape_safe_assign_variable_handle(Tensor handle, int[] shape, Tensor value, string name = null) | |||
| { | |||
| var value_tensor = ops.convert_to_tensor(value); | |||
| return gen_resource_variable_ops.assign_variable_op(handle, | |||
| @@ -96,8 +96,10 @@ namespace Tensorflow | |||
| // We create an assert Op instead of checking right away in order to be | |||
| // compatible with ASYNC execution mode. Further, since not all devices | |||
| // support string tensors, we encode the assertion string in the Op name | |||
| /*gen_logging_ops._assert( | |||
| math_ops.logical_not(exists), [exists], name = "EagerVariableNameReuse");*/ | |||
| /*gen_logging_ops.assert(gen_math_ops.logical_not(exists), | |||
| new[] { exists }, | |||
| name: "EagerVariableNameReuse");*/ | |||
| var handle_data = new HandleData(); | |||
| handle_data.IsSet = true; | |||
| handle_data.ShapeAndType.Add(new HandleShapeAndType | |||
| @@ -456,11 +456,6 @@ namespace Tensorflow | |||
| private void _extend_graph() | |||
| { } | |||
| public void close() | |||
| { | |||
| Dispose(); | |||
| } | |||
| protected override void DisposeUnmanagedResources(IntPtr handle) | |||
| { | |||
| lock (Locks.ProcessWide) | |||
| @@ -475,11 +475,11 @@ namespace Tensorflow | |||
| size += TF_StringEncodedSize((ulong)b.Length); | |||
| ulong src_size = size + (ulong)buffer.Length * sizeof(ulong); | |||
| IntPtr handle = TF_AllocateTensor(TF_DataType.TF_STRING, shape, shape.Length, src_size); | |||
| _handle = TF_AllocateTensor(TF_DataType.TF_STRING, shape, shape.Length, src_size); | |||
| AllocationType = AllocationType.Tensorflow; | |||
| // Clear offset table | |||
| IntPtr input = TF_TensorData(handle); | |||
| IntPtr input = TensorDataPointer; | |||
| IntPtr data_start = input + buffer.Length * sizeof(ulong); | |||
| IntPtr limit = input + (int)src_size; | |||
| ulong offset = 0; | |||
| @@ -496,7 +496,9 @@ namespace Tensorflow | |||
| } | |||
| } | |||
| _handle = handle; | |||
| #if TRACK_TENSOR_LIFE | |||
| print($"New Tensor 0x{_handle.ToString("x16")} {AllocationType} String Data: 0x{TensorDataPointer.ToString("x16")}"); | |||
| #endif | |||
| } | |||
| public unsafe Tensor(NDArray nd, TF_DataType? tensorDType = null) | |||
| @@ -563,12 +565,13 @@ namespace Tensorflow | |||
| { | |||
| AllocationType = AllocationType.FromPointer; | |||
| AllocationHandle = arraySlice; | |||
| #if TRACK_TENSOR_LIFE | |||
| print($"New Tensor {Id} {AllocationType} 0x{TensorDataPointer.ToString("x16")}"); | |||
| #endif | |||
| } | |||
| else | |||
| AllocationType = AllocationType.Tensorflow; | |||
| #if TRACK_TENSOR_LIFE | |||
| print($"New Tensor 0x{_handle.ToString("x16")} {AllocationType} Data: 0x{TensorDataPointer.ToString("x16")}"); | |||
| #endif | |||
| } | |||
| public Tensor(Operation op, int value_index, TF_DataType dtype) | |||
| @@ -181,7 +181,6 @@ namespace Tensorflow | |||
| storage.Allocate(new Shape(shape)); | |||
| var bytesize = (long)this.bytesize; | |||
| System.Buffer.MemoryCopy(buffer.ToPointer(), storage.Address, bytesize, bytesize); | |||
| return new NDArray(storage); | |||
| @@ -24,6 +24,7 @@ using System.Runtime.InteropServices; | |||
| using Tensorflow.Eager; | |||
| using Tensorflow.Framework; | |||
| using Tensorflow.Keras.Engine; | |||
| using Tensorflow.Variables; | |||
| using static Tensorflow.Binding; | |||
| namespace Tensorflow | |||
| @@ -260,12 +261,11 @@ namespace Tensorflow | |||
| [SuppressMessage("ReSharper", "ConvertIfStatementToSwitchStatement")] | |||
| protected override void DisposeUnmanagedResources(IntPtr handle) | |||
| { | |||
| if (AllocationHandle != null) | |||
| { | |||
| #if TRACK_TENSOR_LIFE | |||
| print($"Delete AllocationHandle.{AllocationType} 0x{TensorDataPointer.ToString("x16")}"); | |||
| print($"Delete Tensor 0x{handle.ToString("x16")} {AllocationType} Data: 0x{TensorDataPointer.ToString("x16")}"); | |||
| #endif | |||
| if (AllocationHandle != null) | |||
| { | |||
| if (AllocationType == AllocationType.GCHandle) | |||
| { | |||
| ((GCHandle)AllocationHandle).Free(); | |||
| @@ -287,17 +287,9 @@ namespace Tensorflow | |||
| throw new InvalidOperationException($"Tensor.AllocationHandle is not null ({AllocationHandle}) but AllocationType is not matched to a C# allocation type ({AllocationType})."); | |||
| } | |||
| #if TRACK_TENSOR_LIFE | |||
| print($"Delete TensorHandle 0x{handle.ToString("x16")}"); | |||
| #endif | |||
| c_api.TF_DeleteTensor(handle); | |||
| } | |||
| public virtual IntPtr ToPointer() | |||
| => _handle; | |||
| public bool IsDisposed => _disposed; | |||
| // public int tensor_int_val { get; set; } | |||
| } | |||
| } | |||
| @@ -43,6 +43,11 @@ namespace Tensorflow | |||
| items.AddRange(tensors); | |||
| } | |||
| public Tensors(IEnumerable<Tensor> tensors) | |||
| { | |||
| items.AddRange(tensors); | |||
| } | |||
| public Tensors(NDArray nd) | |||
| { | |||
| items.Add(ops.convert_to_tensor(nd)); | |||
| @@ -35,7 +35,7 @@ namespace Tensorflow | |||
| this.name = name; | |||
| } | |||
| public override ITensorOrOperation restore(Tensor[] restored_tensors, TensorShape[] restored_shapes = null) | |||
| public override Operation restore(Tensor[] restored_tensors, TensorShape[] restored_shapes = null) | |||
| { | |||
| var restored_tensor = restored_tensors[0]; | |||
| restored_tensor = array_ops.identity(restored_tensor); | |||
| @@ -40,7 +40,7 @@ namespace Tensorflow | |||
| this.name = name; | |||
| } | |||
| public virtual ITensorOrOperation restore(Tensor[] restored_tensors, TensorShape[] restored_shapes = null) | |||
| public virtual Operation restore(Tensor[] restored_tensors, TensorShape[] restored_shapes = null) | |||
| { | |||
| var restored_tensor = restored_tensors[0]; | |||
| return gen_state_ops.assign(op, | |||
| @@ -1,5 +1,7 @@ | |||
| using NumSharp; | |||
| using System; | |||
| using Tensorflow.Eager; | |||
| using Tensorflow.Variables; | |||
| using static Tensorflow.Binding; | |||
| namespace Tensorflow | |||
| @@ -22,7 +24,6 @@ namespace Tensorflow | |||
| public bool trainable => _trainable; | |||
| protected Tensor _initial_value; | |||
| public Tensor initial_value => _initial_value; | |||
| public Operation initializer => initializer_op; | |||
| @@ -44,16 +45,12 @@ namespace Tensorflow | |||
| public Operation Op => handle.op; | |||
| public Graph Graph => handle.graph; | |||
| public string Device => handle.Device; | |||
| EagerResourceDeleter eager_resource_deleter; | |||
| public BaseResourceVariable() | |||
| { | |||
| } | |||
| public BaseResourceVariable(IntPtr handle, IntPtr tensor) | |||
| { | |||
| _handle = handle; | |||
| } | |||
| public void __init__(bool trainable = true, | |||
| Tensor handle = null, | |||
| string name = null, | |||
| @@ -66,7 +63,24 @@ namespace Tensorflow | |||
| this.handle = handle; | |||
| _name = name; | |||
| // handle_deleter | |||
| // After the handle has been created, set up a way to clean it up when | |||
| // executing eagerly. We'll hold the only reference to the deleter, so that | |||
| // when this object is garbage collected the deleter will be too. This | |||
| // means ResourceVariables can be part of reference cycles without those | |||
| // cycles being uncollectable. | |||
| if (handle.IsEagerTensor) | |||
| { | |||
| _handle = handle.EagerTensorHandle.DangerousGetHandle(); | |||
| eager_resource_deleter = new EagerResourceDeleter(handle, handle.Device); | |||
| } | |||
| else | |||
| { | |||
| _handle = handle; | |||
| } | |||
| #if TRACK_TENSOR_LIFE | |||
| print($"Created Resource 0x{_handle.ToString("x16")} {_name}"); | |||
| #endif | |||
| } | |||
| public Tensor assign<T>(T value, bool use_locking = false, string name = null, bool read_value = true) | |||
| @@ -85,7 +99,7 @@ namespace Tensorflow | |||
| if (read_value) | |||
| return gen_resource_variable_ops.read_variable_op(handle, dtype); | |||
| return assign_op; | |||
| } | |||
| @@ -214,6 +228,9 @@ namespace Tensorflow | |||
| protected override void DisposeUnmanagedResources(IntPtr handle) | |||
| { | |||
| #if TRACK_TENSOR_LIFE | |||
| print($"Deleted Resource 0x{handle.ToString("x16")} {_name}"); | |||
| #endif | |||
| } | |||
| public Tensor AsTensor(TF_DataType dtype = TF_DataType.DtInvalid, string name = null, bool as_ref = false) | |||
| @@ -0,0 +1,34 @@ | |||
| using System; | |||
| using System.Collections.Generic; | |||
| using System.Text; | |||
| using static Tensorflow.Binding; | |||
| namespace Tensorflow.Variables | |||
| { | |||
| public class EagerResourceDeleter : DisposableObject | |||
| { | |||
| Tensor _tensor; | |||
| string _handle_device; | |||
| public EagerResourceDeleter(Tensor handle, string handle_device) | |||
| { | |||
| _tensor = handle; | |||
| _handle = handle.EagerTensorHandle.DangerousGetHandle(); | |||
| _handle_device = handle_device; | |||
| bool success = false; | |||
| handle.EagerTensorHandle.DangerousAddRef(ref success); | |||
| } | |||
| protected override void DisposeUnmanagedResources(IntPtr handle) | |||
| { | |||
| // gen_resource_variable_ops.destroy_resource_op(_tensor, ignore_lookup_error: true); | |||
| tf.device(_handle_device); | |||
| tf.Runner.TFE_Execute(tf.Context, _handle_device, "DestroyResourceOp", | |||
| new[] { _tensor }, | |||
| new object[] { "ignore_lookup_error", true }, 0); | |||
| _tensor.EagerTensorHandle.DangerousRelease(); | |||
| } | |||
| } | |||
| } | |||
| @@ -344,7 +344,7 @@ namespace Tensorflow | |||
| return assign; | |||
| return assign.op; | |||
| } | |||
| public override string ToString() | |||
| { | |||
| return $"tf.RefVariable '{Name}' shape={shape} dtype={dtype}"; | |||
| @@ -78,8 +78,7 @@ namespace Tensorflow | |||
| tf_with(ops.init_scope(), init_scope => | |||
| { | |||
| _in_graph_mode = !tf.Context.executing_eagerly(); | |||
| var values = init_from_fn ? new object[0] : new object[] { initial_value }; | |||
| tf_with(ops.name_scope(name, "Variable", values, skip_on_eager: false), scope => | |||
| tf_with(ops.name_scope(name, "Variable", initial_value, skip_on_eager: false), scope => | |||
| { | |||
| name = scope; | |||
| var handle_name = ops.name_from_scope_name(name); | |||
| @@ -103,19 +102,17 @@ namespace Tensorflow | |||
| tf_with(ops.name_scope("Initializer"), delegate | |||
| { | |||
| if (initial_value.GetType().GetInterface("IInitializer") != null) | |||
| initial_value = ops.convert_to_tensor((initial_value as IInitializer).Apply(new InitializerArgs(shape, dtype: dtype))); | |||
| _initial_value = ops.convert_to_tensor((initial_value as IInitializer).Apply(new InitializerArgs(shape, dtype: dtype))); | |||
| else | |||
| { | |||
| var value = init_from_fn ? (initial_value as Func<Tensor>)() : initial_value; | |||
| initial_value = ops.convert_to_tensor(value, | |||
| _initial_value = ops.convert_to_tensor(value, | |||
| name: "initial_value", | |||
| dtype: dtype); | |||
| } | |||
| }); | |||
| _shape = shape ?? (initial_value as Tensor).TensorShape; | |||
| _initial_value = initial_value as Tensor; | |||
| _shape = shape ?? _initial_value.TensorShape; | |||
| if (_in_graph_mode) | |||
| { | |||
| @@ -141,7 +138,7 @@ namespace Tensorflow | |||
| initializer_op = null; | |||
| _graph_element = null; | |||
| _dtype = _initial_value.dtype.as_base_dtype(); | |||
| initial_value = _in_graph_mode ? initial_value : null; | |||
| // initial_value = _in_graph_mode ? initial_value : null; | |||
| } | |||
| base.__init__(trainable: trainable, | |||
| @@ -60,6 +60,18 @@ namespace Tensorflow | |||
| bool use_locking = true, | |||
| string name = null) | |||
| { | |||
| if (tf.executing_eagerly()) | |||
| { | |||
| var results = tf.Runner.TFE_FastPathExecute(tf.Context, tf.Context.DeviceName, | |||
| "Assign", name, | |||
| null, | |||
| @ref, value, | |||
| "validate_shape", validate_shape, | |||
| "use_locking", use_locking); | |||
| return results[0]; | |||
| } | |||
| var _op = tf.OpDefLib._apply_op_helper("Assign", name: name, args: new { @ref, value, validate_shape, use_locking }); | |||
| var _result = _op.outputs; | |||
| @@ -101,7 +101,7 @@ namespace Tensorflow | |||
| foreach (var op_input in op_input_list) | |||
| { | |||
| // Determine if this is a valid graph_element. | |||
| var graph_element = op_input; | |||
| // var graph_element = op_input; | |||
| } | |||
| return get_default_graph(); | |||
| @@ -79,7 +79,7 @@ namespace Tensorflow | |||
| return (null, null); | |||
| if (name == null) | |||
| name = ""; | |||
| name = _default_name; | |||
| var scope_name = name; | |||
| var old_name = ctx.ScopeName; | |||
| @@ -118,10 +118,20 @@ namespace Tensorflow.Keras | |||
| { | |||
| tf.Context.reset_context(); | |||
| reset_uids(); | |||
| ops.set_default_session(tf.Session(ops.get_default_graph())); | |||
| // var phase = tf.placeholder_with_default(false, new int[] { }, name: "keras_learning_phase"); | |||
| _GRAPH_LEARNING_PHASES = new Dictionary<Graph, GraphLearningPhase>(); | |||
| _GRAPH_LEARNING_PHASES[tf.get_default_graph()] = 0; | |||
| if (_GRAPH_LEARNING_PHASES != null) | |||
| _GRAPH_LEARNING_PHASES.Clear(); | |||
| if (_GRAPH_LEARNING_PHASES != null) | |||
| _GRAPH_LEARNING_PHASES.Clear(); | |||
| PER_GRAPH_LAYER_NAME_UIDS.Clear(); | |||
| _CURRENT_SCRATCH_GRAPH = null; | |||
| _GRAPH = null; | |||
| ops.set_default_session(tf.Session(ops.get_default_graph())); | |||
| tf.enable_eager_execution(); | |||
| GC.Collect(); | |||
| GC.WaitForPendingFinalizers(); | |||
| } | |||
| public void manual_variable_initialization(bool value) | |||
| { | |||
| @@ -147,7 +157,7 @@ namespace Tensorflow.Keras | |||
| if (ops.executing_eagerly_outside_functions()) | |||
| { | |||
| foreach (var (x, value) in tuples) | |||
| x.assign(value); | |||
| x.assign(value, read_value: false); | |||
| } | |||
| else | |||
| { | |||
| @@ -93,13 +93,11 @@ namespace Tensorflow.Keras.Engine.DataAdapters | |||
| public IEnumerable<(int, OwnedIterator)> enumerate_epochs() | |||
| { | |||
| var data_iterator = new OwnedIterator(_dataset); | |||
| foreach (var epoch in range(_initial_epoch, _epochs)) | |||
| { | |||
| if (_insufficient_data) | |||
| break; | |||
| if (_adapter.ShouldRecreateIterator()) | |||
| data_iterator = new OwnedIterator(_dataset); | |||
| using var data_iterator = new OwnedIterator(_dataset); | |||
| yield return (epoch, data_iterator); | |||
| } | |||
| } | |||
| @@ -72,8 +72,8 @@ namespace Tensorflow.Keras.Engine.DataAdapters | |||
| IDatasetV2 slice_inputs(IDatasetV2 indices_dataset, Tensors elements) | |||
| { | |||
| var dataset2 = tf.data.Dataset.from_tensor(elements).repeat(); | |||
| var dataset = tf.data.Dataset.zip(indices_dataset, dataset2); | |||
| var dataset = tf.data.Dataset.from_tensor(elements).repeat(); | |||
| dataset = tf.data.Dataset.zip(indices_dataset, dataset); | |||
| dataset = dataset.map(inputs => | |||
| { | |||
| @@ -84,9 +84,7 @@ namespace Tensorflow.Keras.Engine.DataAdapters | |||
| return new Tensors(results); | |||
| }); | |||
| dataset = dataset.with_options(new DatasetOptions { }); | |||
| return dataset; | |||
| return dataset.with_options(new DatasetOptions { }); | |||
| } | |||
| public override int GetSize() | |||
| @@ -64,7 +64,7 @@ namespace Tensorflow.Keras.Engine | |||
| { | |||
| var (layer, node_index, tensor_index) = x.KerasHistory; | |||
| _output_layers.append(layer); | |||
| _output_coordinates.append(new KerasHistory(layer, node_index, tensor_index, x)); | |||
| _output_coordinates.append(new KerasHistory(layer, node_index, tensor_index)); | |||
| } | |||
| // Build self._input_layers: | |||
| @@ -72,7 +72,7 @@ namespace Tensorflow.Keras.Engine | |||
| { | |||
| var (layer, node_index, tensor_index) = x.KerasHistory; | |||
| _input_layers.append(layer); | |||
| _input_coordinates.append(new KerasHistory(layer, node_index, tensor_index, x)); | |||
| _input_coordinates.append(new KerasHistory(layer, node_index, tensor_index)); | |||
| } | |||
| // Keep track of the network's nodes and layers. | |||
| @@ -40,7 +40,8 @@ namespace Tensorflow.Keras.Engine | |||
| outputs = Call(inputs, state: state, is_training: is_training); | |||
| outputs = _set_connectivity_metadata_(inputs, outputs); | |||
| // memory leak | |||
| // _set_connectivity_metadata_(inputs, outputs); | |||
| _handle_activity_regularization(inputs, outputs); | |||
| _set_mask_metadata(inputs, outputs, null); | |||
| }); | |||
| @@ -40,8 +40,8 @@ namespace Tensorflow.Keras.Engine | |||
| throw new NotImplementedException(""); | |||
| outputs = Call(inputs); | |||
| outputs = _set_connectivity_metadata_(inputs, outputs); | |||
| _set_connectivity_metadata_(inputs, outputs); | |||
| _handle_activity_regularization(inputs, outputs); | |||
| _set_mask_metadata(inputs, outputs, null); | |||
| }); | |||
| @@ -120,15 +120,14 @@ namespace Tensorflow.Keras.Engine | |||
| public void SetConnectivityMetadata(Tensors inputs, Tensors outputs) | |||
| => _set_connectivity_metadata_(inputs, outputs); | |||
| private Tensors _set_connectivity_metadata_(Tensors inputs, Tensors outputs) | |||
| private void _set_connectivity_metadata_(Tensors inputs, Tensors outputs) | |||
| { | |||
| new Node(this, new NodeArgs | |||
| var node = new Node(new NodeArgs | |||
| { | |||
| InputTensors = inputs, | |||
| Outputs = outputs | |||
| }); | |||
| return outputs; | |||
| node.Connect(this); | |||
| } | |||
| private void _handle_activity_regularization(Tensors inputs, Tensors outputs) | |||
| @@ -99,6 +99,9 @@ namespace Tensorflow.Keras.Engine | |||
| var result_pairs = string.Join(", ", results.Select(x => $"{x.Item1}: {(float)x.Item2:F6}")); | |||
| Console.WriteLine($"Epoch: {epoch + 1:D3}/{epochs:D3}, Step: {step + 1:D4}/{data_handler.Inferredsteps:D4}, {result_pairs}"); | |||
| } | |||
| GC.Collect(); | |||
| GC.WaitForPendingFinalizers(); | |||
| } | |||
| } | |||
| } | |||
| @@ -59,6 +59,8 @@ namespace Tensorflow.Keras.Engine | |||
| var end_step = step + data_handler.StepIncrement; | |||
| // callbacks.on_predict_batch_end(end_step, {'outputs': batch_outputs}) | |||
| } | |||
| GC.Collect(); | |||
| GC.WaitForPendingFinalizers(); | |||
| } | |||
| // callbacks.on_predict_end() | |||
| return outputs; | |||
| @@ -40,7 +40,8 @@ namespace Tensorflow.Keras.Engine | |||
| public TensorShape[] input_shapes; | |||
| public TensorShape[] output_shapes; | |||
| public List<Tensor> KerasInputs { get; set; } = new List<Tensor>(); | |||
| public ILayer Layer { get; set; } | |||
| ILayer _layer; | |||
| public ILayer Layer => _layer; | |||
| public bool is_input => args.InputTensors == null; | |||
| public long[] FlatInputIds { get; set; } | |||
| public long[] FlatOutputIds { get; set; } | |||
| @@ -61,19 +62,24 @@ namespace Tensorflow.Keras.Engine | |||
| } | |||
| } | |||
| public Node(Layer layer, NodeArgs args) | |||
| public Node(NodeArgs args) | |||
| { | |||
| this.args = args; | |||
| this.Layer = layer; | |||
| } | |||
| public void Connect(Layer layer) | |||
| { | |||
| _layer = layer; | |||
| if (args.InputTensors != null) | |||
| KerasInputs.AddRange(args.InputTensors); | |||
| foreach (var (i, ele) in enumerate(KerasInputs)) | |||
| _keras_inputs_ids_and_indices[i] = ele.Id; | |||
| // Wire up Node to Layers. | |||
| layer.InboundNodes.Add(this); | |||
| foreach (var kt in KerasInputs) | |||
| { | |||
| if (kt.KerasHistory == null) | |||
| @@ -86,7 +92,7 @@ namespace Tensorflow.Keras.Engine | |||
| // Set metadata on outputs. | |||
| var node_index = layer.InboundNodes.Count - 1; | |||
| foreach (var (i, tensor) in enumerate(Outputs)) | |||
| tensor.KerasHistory = new KerasHistory(layer, node_index, i, tensor); | |||
| tensor.KerasHistory = new KerasHistory(layer, node_index, i); | |||
| // Cached for performance. | |||
| FlatInputIds = KerasInputs.Select(x => x.Id).ToArray(); | |||
| @@ -64,7 +64,7 @@ namespace Tensorflow.Keras.Engine | |||
| public void add(Tensor tensor) | |||
| { | |||
| var layer = tensor.KerasHistory.Layer as Layer; | |||
| var layer = tensor.KerasHistory.Layer; | |||
| add(layer); | |||
| } | |||
| @@ -72,7 +72,7 @@ namespace Tensorflow.Keras.Engine | |||
| /// Adds a layer instance on top of the layer stack. | |||
| /// </summary> | |||
| /// <param name="layer"></param> | |||
| public void add(Layer layer) | |||
| public void add(ILayer layer) | |||
| { | |||
| built = false; | |||
| var set_inputs = false; | |||
| @@ -90,10 +90,11 @@ namespace Tensorflow.Keras.Layers | |||
| // and set output_tensors' _keras_history. | |||
| // input_tensor._keras_history = base_layer.KerasHistory(self, 0, 0) | |||
| // input_tensor._keras_mask = None | |||
| new Node(this, new NodeArgs | |||
| var node = new Node(new NodeArgs | |||
| { | |||
| Outputs = args.InputTensor | |||
| }); | |||
| node.Connect(this); | |||
| typeSpec = new TensorSpec(args.InputTensor.TensorShape, | |||
| dtype: args.InputTensor.dtype, | |||
| @@ -42,15 +42,13 @@ namespace Tensorflow.Keras.Utils | |||
| Func<Tensor> init_val = () => args.Initializer.Apply(new InitializerArgs(args.Shape, dtype: args.DType)); | |||
| var variable_dtype = args.DType.as_base_dtype(); | |||
| var v = tf.Variable(init_val, | |||
| return tf.Variable(init_val, | |||
| dtype: variable_dtype, | |||
| shape: args.Shape, | |||
| name: args.Name, | |||
| trainable: args.Trainable, | |||
| validate_shape: args.ValidateShape, | |||
| use_resource: args.UseResource); | |||
| return v; | |||
| } | |||
| /// <summary> | |||
| @@ -51,6 +51,8 @@ namespace Tensorflow.Benchmark.Leak | |||
| metrics: new[] { "accuracy" }); | |||
| model.fit(inputImages, outLables, batch_size: 32, epochs: 200); | |||
| keras.backend.clear_session(); | |||
| } | |||
| } | |||
| } | |||
| @@ -8,13 +8,13 @@ namespace Tensorflow.Native.UnitTest | |||
| /// `class CApiAttributesTest` | |||
| /// </summary> | |||
| [TestClass] | |||
| public class CApiAttributesTestcs : CApiTest, IDisposable | |||
| public class AttributesTestcs : CApiTest, IDisposable | |||
| { | |||
| private Graph graph_; | |||
| private int counter_; | |||
| private Status s_; | |||
| public CApiAttributesTestcs() | |||
| public AttributesTestcs() | |||
| { | |||
| s_ = new Status(); | |||
| graph_ = new Graph(); | |||
| @@ -11,7 +11,7 @@ namespace Tensorflow.Native.UnitTest | |||
| /// `class CApiColocationTest` | |||
| /// </summary> | |||
| [TestClass] | |||
| public class CApiFunctionTest : CApiTest, IDisposable | |||
| public class FunctionTest : CApiTest, IDisposable | |||
| { | |||
| Graph func_graph_; | |||
| Graph host_graph_; | |||
| @@ -10,7 +10,7 @@ namespace Tensorflow.Native.UnitTest | |||
| /// `class CApiGradientsTest` | |||
| /// </summary> | |||
| [TestClass] | |||
| public class CApiGradientsTest : CApiTest, IDisposable | |||
| public class GradientsTest : CApiTest, IDisposable | |||
| { | |||
| private Graph graph_ = new Graph(); | |||
| private Graph expected_graph_ = new Graph(); | |||
| @@ -48,6 +48,16 @@ namespace TensorFlowNET.UnitTest.Basics | |||
| Assert.AreEqual(11f, (float)v1.numpy()); | |||
| } | |||
| [TestMethod] | |||
| public void Assign3() | |||
| { | |||
| var v1 = tf.Variable(10.0f, name: "v1"); | |||
| var v2 = tf.Variable(v1, name: "v2"); | |||
| Assert.AreEqual(v1.numpy(), v2.numpy()); | |||
| v1.assign(30.0f); | |||
| Assert.AreNotEqual(v1.numpy(), v2.numpy()); | |||
| } | |||
| /// <summary> | |||
| /// Assign tensor to slice of other tensor. | |||
| /// https://www.tensorflow.org/api_docs/python/tf/Variable#__getitem__ | |||