Browse Source

use `open type` for F# readme example

This is the proper way to mimic the C# `using static` code.
pull/665/head
Nicholas Hirschey 5 years ago
parent
commit
56297aacf2
1 changed files with 5 additions and 5 deletions
  1. +5
    -5
      README.md

+ 5
- 5
README.md View File

@@ -169,12 +169,12 @@ Linear Regression in `Eager` mode:
#r "nuget: SciSharp.TensorFlow.Redist" #r "nuget: SciSharp.TensorFlow.Redist"
#r "nuget: NumSharp" #r "nuget: NumSharp"


open System
open NumSharp open NumSharp
open Tensorflow open Tensorflow
open Tensorflow.Keras
open type Tensorflow.Binding
open type Tensorflow.KerasApi


let tf = Binding.New<tensorflow>()
let tf = New<tensorflow>()
tf.enable_eager_execution() tf.enable_eager_execution()


// Parameters // Parameters
@@ -194,7 +194,7 @@ let n_samples = train_X.shape.[0]
// We can set a fixed init value in order to demo // We can set a fixed init value in order to demo
let W = tf.Variable(-0.06f,name = "weight") let W = tf.Variable(-0.06f,name = "weight")
let b = tf.Variable(-0.73f, name = "bias") let b = tf.Variable(-0.73f, name = "bias")
let optimizer = KerasApi.keras.optimizers.SGD(learning_rate)
let optimizer = keras.optimizers.SGD(learning_rate)


// Run training for the given number of steps. // Run training for the given number of steps.
for step = 1 to (training_steps + 1) do for step = 1 to (training_steps + 1) do
@@ -210,7 +210,7 @@ for step = 1 to (training_steps + 1) do
let gradients = g.gradient(loss,struct (W,b)) let gradients = g.gradient(loss,struct (W,b))


// Update W and b following gradients. // Update W and b following gradients.
optimizer.apply_gradients(Binding.zip(gradients, struct (W,b)))
optimizer.apply_gradients(zip(gradients, struct (W,b)))


if (step % display_step) = 0 then if (step % display_step) = 0 then
let pred = W * train_X + b let pred = W * train_X + b


Loading…
Cancel
Save