Browse Source

!9093 Update examples in doc for bijector and distribution classes

From: @shallydeng
Reviewed-by: @zichun_ye,@sunnybeike
Signed-off-by: @sunnybeike
tags/v1.1.0
mindspore-ci-bot Gitee 5 years ago
parent
commit
27e8ede449
13 changed files with 565 additions and 455 deletions
  1. +1
    -1
      mindspore/nn/probability/bijector/bijector.py
  2. +20
    -17
      mindspore/nn/probability/bijector/exp.py
  3. +30
    -19
      mindspore/nn/probability/bijector/gumbel_cdf.py
  4. +20
    -15
      mindspore/nn/probability/bijector/invert.py
  5. +25
    -20
      mindspore/nn/probability/bijector/power_transform.py
  6. +23
    -15
      mindspore/nn/probability/bijector/scalar_affine.py
  7. +26
    -15
      mindspore/nn/probability/bijector/softplus.py
  8. +77
    -66
      mindspore/nn/probability/distribution/bernoulli.py
  9. +79
    -66
      mindspore/nn/probability/distribution/exponential.py
  10. +77
    -66
      mindspore/nn/probability/distribution/geometric.py
  11. +84
    -70
      mindspore/nn/probability/distribution/normal.py
  12. +19
    -16
      mindspore/nn/probability/distribution/transformed_distribution.py
  13. +84
    -69
      mindspore/nn/probability/distribution/uniform.py

+ 1
- 1
mindspore/nn/probability/bijector/bijector.py View File

@@ -165,7 +165,7 @@ class Bijector(Cell):
elif value_t.dtype != self.common_dtype:
raise TypeError(f"{name} should have the same dtype as other arguments.")
# check if the parameters are casted into float-type tensors
validator.check_type_name("dtype", value_t.dtype, mstype.float_type, type(self).__name__)
validator.check_type_name(f"dtype of {name}", value_t.dtype, mstype.float_type, type(self).__name__)
# check if the dtype of the input_parameter agrees with the bijector's dtype
elif value_t.dtype != self.dtype:
raise TypeError(f"{name} should have the same dtype as the bijector's dtype.")


+ 20
- 17
mindspore/nn/probability/bijector/exp.py View File

@@ -28,24 +28,27 @@ class Exp(PowerTransform):
name (str): The name of the Bijector. Default: 'Exp'.

Examples:
>>> # To initialize an Exp bijector.
>>> import mindspore.nn.probability.bijector as msb
>>> n = msb.Exp()
>>> import mindspore
>>> import mindspore.nn as nn
>>> from mindspore import Tensor
>>> import mindspore.context as context
>>> context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
>>>
>>> # To use an Exp bijector in a network.
>>> class net(Cell):
... def __init__(self):
... super(net, self).__init__():
... self.e1 = msb.Exp()
...
... def construct(self, value):
... # Similar calls can be made to other functions
... # by replacing `forward` by the name of the function.
... ans1 = self.s1.forward(value)
... ans2 = self.s1.inverse(value)
... ans3 = self.s1.forward_log_jacobian(value)
... ans4 = self.s1.inverse_log_jacobian(value)
...
>>> # To initialize an Exp bijector.
>>> exp_bijector = nn.probability.bijector.Exp()
>>> value = Tensor([1, 2, 3], dtype=mindspore.float32)
>>> ans1 = exp_bijector.forward(value)
[ 2.7182817 7.389056 20.085537 ]
>>> print(ans1)
>>> ans2 = exp_bijector.inverse(value)
[0. 0.6931472 1.0986123]
>>> print(ans2)
>>> ans3 = exp_bijector.forward_log_jacobian(value)
>>> print(ans3)
[1. 2. 3.]
>>> ans4 = exp_bijector.inverse_log_jacobian(value)
>>> print(ans4)
[-0. -0.6931472 -1.0986123]
"""

def __init__(self,


+ 30
- 19
mindspore/nn/probability/bijector/gumbel_cdf.py View File

@@ -27,33 +27,44 @@ class GumbelCDF(Bijector):
.. math::
Y = \exp(-\exp(\frac{-(X - loc)}{scale}))

Note:
For `inverse` and `inverse_log_jacobian`, input should be in range of (0, 1).

Args:
loc (float, list, numpy.ndarray, Tensor): The location. Default: 0..
scale (float, list, numpy.ndarray, Tensor): The scale. Default: 1.0.
name (str): The name of the Bijector. Default: 'Gumbel_CDF'.

Note:
For `inverse` and `inverse_log_jacobian`, input should be in range of (0, 1).
The dtype of `loc` and `scale` must be float.
If `loc`, `scale` are passed in as numpy.ndarray or tensor, they have to have
the same dtype otherwise an error will be raised.

Raises:
TypeError: When the dtype of `loc` or `scale` is not float,
and when the dtype of `loc` and `scale` is not same.

Examples:
>>> # To initialize a GumbelCDF bijector of loc 0.0, and scale 1.0.
>>> import mindspore
>>> import mindspore.nn as nn
>>> import mindspore.nn.probability.bijector as msb
>>> gum = msb.GumbelCDF(0.0, 1.0)
>>> from mindspore import Tensor
>>>
>>> # To use GumbelCDF bijector in a network.
>>> class net(Cell):
... def __init__(self):
... super(net, self).__init__():
... self.gum = msb.GumbelCDF(0.0, 1.0)
...
... def construct(self, value):
... # Similar calls can be made to other functions
... # by replacing 'forward' by the name of the function.
... ans1 = self.gum.forward(value)
... ans2 = self.gum.inverse(value)
... ans3 = self.gum.forward_log_jacobian(value)
... ans4 = self.gum.inverse_log_jacobian(value)
...
>>> # To initialize a GumbelCDF bijector of loc 1.0, and scale 2.0.
>>> gumbel_cdf = msb.GumbelCDF(1.0, 2.0)
>>> # To use a ScalarAffine bijector in a network.
>>> x = Tensor([1, 2, 3], dtype=mindspore.float32)
>>> y = Tensor([0.1, 0.2, 0.3], dtype=mindspore.float32)
>>> ans1 = gumbel_cdf.forward(x)
>>> print(ans1)
[0.36787945 0.54523915 0.6922006 ]
>>> ans2 = gumbel_cdf.inverse(y)
>>> print(ans2)
[-0.66806495 0.04822993 0.62874645]
>>> ans3 = gumbel_cdf.forward_log_jacobian(x)
>>> print(ans3)
[-1.6931472 -1.7996778 -2.0610266]
>>> ans4 = gumbel_cdf.inverse_log_jacobian(y)
>>> print(ans4)
[2.1616998 1.8267001 1.7114931]
"""

def __init__(self,


+ 20
- 15
mindspore/nn/probability/bijector/invert.py View File

@@ -26,23 +26,28 @@ class Invert(Bijector):
name (str): The name of the Bijector. Default: Invert.

Examples:
>>> # To initialize an Invert bijector.
>>> import mindspore
>>> import mindspore.nn as nn
>>> import mindspore.nn.probability.bijector as msb
>>> n = msb.Invert(msb.Exp())
>>> from mindspore import Tensor
>>> import mindspore.context as context
>>> context.set_context(mode=1)
>>>
>>> # To use an Invert bijector in a network.
>>> class net(Cell):
... def __init__(self):
... super(net, self).__init__():
... self.inv = msb.Invert(msb.Exp())
...
... def construct(self, value):
... # Similar calls can be made to other functions
... # by replacing `forward` by the name of the function.
... ans1 = self.inv.forward(value)
... ans2 = self.inv.inverse(value)
... ans3 = self.inv.forward_log_jacobian(value)
... ans4 = self.inv.inverse_log_jacobian(value)
>>> # To initialize an inverse Exp bijector.
>>> inv_exp = msb.Invert(msb.Exp())
>>> value = Tensor([1, 2, 3], dtype=mindspore.float32)
>>> ans1 = inv_exp.forward(value)
>>> print(ans1)
[0. 0.6931472 1.0986123]
>>> ans2 = inv_exp.inverse(value)
>>> print(ans2)
[ 2.7182817 7.389056 20.085537 ]
>>> ans3 = inv_exp.forward_log_jacobian(value)
>>> print(ans3)
[-0. -0.6931472 -1.0986123]
>>> ans4 = inv_exp.inverse_log_jacobian(value)
>>> print(ans4)
[1. 2. 3.]
"""

def __init__(self,


+ 25
- 20
mindspore/nn/probability/bijector/power_transform.py View File

@@ -32,32 +32,37 @@ class PowerTransform(Bijector):

This Bijector is equivalent to the `Exp` bijector when `c=0`.

Raises:
ValueError: When the power is less than 0 or is not known statically.

Args:
power (float, list, numpy.ndarray, Tensor): The scale factor. Default: 0.
name (str): The name of the bijector. Default: 'PowerTransform'.

Note:
The dtype of `power` must be float.

Raises:
ValueError: When `power` is less than 0 or is not known statically.
TypeError: When the dtype of `power` is not float.

Examples:
>>> # To initialize a PowerTransform bijector of power 0.5.
>>> import mindspore
>>> import mindspore.nn as nn
>>> import mindspore.nn.probability.bijector as msb
>>> n = msb.PowerTransform(0.5)
>>>
>>> # To use a PowerTransform bijector in a network.
>>> class net(Cell):
... def __init__(self):
... super(net, self).__init__():
... self.p1 = msb.PowerTransform(0.5)
...
... def construct(self, value):
... # Similar calls can be made to other functions
... # by replacing 'forward' by the name of the function.
... ans1 = self.s1.forward(value)
... ans2 = self.s1.inverse(value)
... ans3 = self.s1.forward_log_jacobian(value)
... ans4 = self.s1.inverse_log_jacobian(value)
...
>>> from mindspore import Tensor
>>> # To initialize a PowerTransform bijector of power 0.5.
>>> powertransform = msb.PowerTransform(0.5)
>>> value = Tensor([1, 2, 3], dtype=mindspore.float32)
>>> ans1 = powertransform.forward(value)
>>> print(ans1)
[2.25 4. 6.25]
>>> ans2 = powertransform.inverse(value)
>>> print(ans2)
[0. 0.82842714 1.4641017 ]
>>> ans3 = powertransform.forward_log_jacobian(value)
>>> print(ans3)
[0.40546513 0.6931472 0.91629076]
>>> ans4 = powertransform.inverse_log_jacobian(value)
>>> print(ans4)
[-0. -0.3465736 -0.54930615]
"""

def __init__(self,


+ 23
- 15
mindspore/nn/probability/bijector/scalar_affine.py View File

@@ -33,26 +33,34 @@ class ScalarAffine(Bijector):
name (str): The name of the bijector. Default: 'ScalarAffine'.

Note:
The dtype of `shift` and `scale` must be float.
If `shift`, `scale` are passed in as numpy.ndarray or tensor, they have to have
the same dtype otherwise an error will be raised.

Raises:
TypeError: When the dtype of `shift` or `scale` is not float,
and when the dtype of `shift` and `scale` is not same.

Examples:
>>> # To initialize a ScalarAffine bijector of scale 1 and shift 2.
>>> scalaraffine = nn.probability.bijector.ScalarAffine(1, 2)
>>> import mindspore
>>> import mindspore.nn as nn
>>> from mindspore import Tensor
>>>
>>> # To use a ScalarAffine bijector in a network.
>>> class net(Cell):
... def __init__(self):
... super(net, self).__init__():
... self.s1 = nn.probability.bijector.ScalarAffine(1, 2)
...
... def construct(self, value):
... # Similar calls can be made to other functions
... # by replacing 'forward' by the name of the function.
... ans1 = self.s1.forward(value)
... ans2 = self.s1.inverse(value)
... ans3 = self.s1.forward_log_jacobian(value)
... ans4 = self.s1.inverse_log_jacobian(value)
>>> # To initialize a ScalarAffine bijector of scale 1.0 and shift 2.
>>> scalaraffine = nn.probability.bijector.ScalarAffine(1.0, 2.0)
>>> value = Tensor([1, 2, 3], dtype=mindspore.float32)
>>> ans1 = scalaraffine.forward(value)
>>> print(ans1)
[3. 4. 5.]
>>> ans2 = scalaraffine.inverse(value)
>>> print(ans2)
[-1. 0. 1.]
>>> ans3 = scalaraffine.forward_log_jacobian(value)
>>> print(ans3)
0.0
>>> ans4 = scalaraffine.inverse_log_jacobian(value)
>>> print(ans4)
-0.0
...
"""



+ 26
- 15
mindspore/nn/probability/bijector/softplus.py View File

@@ -33,23 +33,34 @@ class Softplus(Bijector):
sharpness (float, list, numpy.ndarray, Tensor): The scale factor. Default: 1.0.
name (str): The name of the Bijector. Default: 'Softplus'.

Note:
The dtype of `sharpness` must be float.

Raises:
TypeError: When the dtype of the sharpness is not float.

Examples:
>>> # To initialize a Softplus bijector of sharpness 2.
>>> softplus = nn.probability.bijector.Softplus(2)
>>> import mindspore
>>> import mindspore.nn as nn
>>> import mindspore.nn.probability.bijector as msb
>>> from mindspore import Tensor
>>>
>>> # To use ScalarAffine bijector in a network.
>>> class net(Cell):
... def __init__(self):
... super(net, self).__init__():
... self.sp1 = nn.probability.bijector.Softplus(2.)
...
... def construct(self, value):
... # Similar calls can be made to other functions
... # by replacing 'forward' by the name of the function.
... ans1 = self.sp1.forward(value)
... ans2 = self.sp1.inverse(value)
... ans3 = self.sp1.forward_log_jacobian(value)
... ans4 = self.sp1.inverse_log_jacobian(value)
>>> # To initialize a Softplus bijector of sharpness 2.0.
>>> softplus = msb.Softplus(2.0)
>>> # To use a ScalarAffine bijector in a network.
>>> value = Tensor([1, 2, 3], dtype=mindspore.float32)
>>> ans1 = softplus.forward(value)
>>> print(ans1)
[1.063464 2.009075 3.0012379]
>>> ans2 = softplus.inverse(value)
>>> print(ans2)
[0.9272933 1.9907573 2.998759 ]
>>> ans3 = softplus.forward_log_jacobian(value)
>>> print(ans3)
[-0.12692806 -0.01814996 -0.00247564]
>>> ans4 = softplus.inverse_log_jacobian(value)
>>> print(ans4)
[0.1454134 0.01848531 0.00248194]
"""

def __init__(self,


+ 77
- 66
mindspore/nn/probability/distribution/bernoulli.py View File

@@ -37,75 +37,86 @@ class Bernoulli(Distribution):
`dist_spec_args` is `probs`.

Examples:
>>> # To initialize a Bernoulli distribution of the probability 0.5.
>>> import mindspore
>>> import mindspore.nn as nn
>>> import mindspore.nn.probability.distribution as msd
>>> b = msd.Bernoulli(0.5, dtype=mstype.int32)
>>>
>>> # The following creates two independent Bernoulli distributions.
>>> b = msd.Bernoulli([0.5, 0.5], dtype=mstype.int32)
>>>
>>> from mindspore import Tensor
>>> # To initialize a Bernoulli distribution of the probability 0.5.
>>> b1 = msd.Bernoulli(0.5, dtype=mindspore.int32)
>>> # A Bernoulli distribution can be initialized without arguments.
>>> # In this case, `probs` must be passed in through arguments during function calls.
>>> b = msd.Bernoulli(dtype=mstype.int32)
>>>
>>> # To use the Bernoulli distribution in a network.
>>> class net(Cell):
... def __init__(self):
... super(net, self).__init__():
... self.b1 = msd.Bernoulli(0.5, dtype=mstype.int32)
... self.b2 = msd.Bernoulli(dtype=mstype.int32)
...
... # All the following calls in construct are valid.
... def construct(self, value, probs_b, probs_a):
...
... # Private interfaces of probability functions corresponding to public interfaces, including
... # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, are the same as follows.
... # Args:
... # value (Tensor): the value to be evaluated.
... # probs1 (Tensor): the probability of success. Default: self.probs.
...
... # Examples of `prob`.
... # Similar calls can be made to other probability functions
... # by replacing `prob` by the name of the function.
... ans = self.b1.prob(value)
... # Evaluate `prob` with respect to distribution b.
... ans = self.b1.prob(value, probs_b)
... # `probs` must be passed in during function calls.
... ans = self.b2.prob(value, probs_a)
...
...
... # Functions `mean`, `sd`, `var`, and `entropy` have the same arguments.
... # Args:
... # probs1 (Tensor): the probability of success. Default: self.probs.
...
... # Examples of `mean`. `sd`, `var`, and `entropy` are similar.
... ans = self.b1.mean() # return 0.5
... ans = self.b1.mean(probs_b) # return probs_b
... # `probs` must be passed in during function calls.
... ans = self.b2.mean(probs_a)
...
...
... # Interfaces of `kl_loss` and `cross_entropy` are the same as follows:
... # Args:
... # dist (str): the name of the distribution. Only 'Bernoulli' is supported.
... # probs1_b (Tensor): the probability of success of distribution b.
... # probs1_a (Tensor): the probability of success of distribution a. Default: self.probs.
...
... # Examples of kl_loss. `cross_entropy` is similar.
... ans = self.b1.kl_loss('Bernoulli', probs_b)
... ans = self.b1.kl_loss('Bernoulli', probs_b, probs_a)
... # An additional `probs_a` must be passed in.
... ans = self.b2.kl_loss('Bernoulli', probs_b, probs_a)
...
...
... # Examples of `sample`.
... # Args:
... # shape (tuple): the shape of the sample. Default: ().
... # probs1 (Tensor): the probability of success. Default: self.probs.
... ans = self.b1.sample()
... ans = self.b1.sample((2,3))
... ans = self.b1.sample((2,3), probs_b)
... ans = self.b2.sample((2,3), probs_a)
>>> b2 = msd.Bernoulli(dtype=mindspore.int32)

>>> # Here are some tensors used below for testing
>>> value = Tensor([1, 0, 1], dtype=mindspore.int32)
>>> probs_a = Tensor([0.6], dtype=mindspore.float32)
>>> probs_b = Tensor([0.2, 0.3, 0.4], dtype=mindspore.float32)

>>> # Private interfaces of probability functions corresponding to public interfaces, including
>>> # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, are the same as follows.
>>> # Args:
>>> # value (Tensor): the value to be evaluated.
>>> # probs1 (Tensor): the probability of success. Default: self.probs.
>>> # Examples of `prob`.
>>> # Similar calls can be made to other probability functions
>>> # by replacing `prob` by the name of the function.
>>> ans = b1.prob(value)
>>> print(ans)
[0.5 0.5 0.5]
>>> # Evaluate `prob` with respect to distribution b.
>>> ans = b1.prob(value, probs_b)
>>> print(ans)
[0.2 0.5 0.4]
>>> # `probs` must be passed in during function calls.
>>> ans = b2.prob(value, probs_a)
>>> print(ans)
[0.6 0.4 0.6]
>>> # Functions `mean`, `sd`, `var`, and `entropy` have the same arguments.
>>> # Args:
>>> # probs1 (Tensor): the probability of success. Default: self.probs.
>>> # Examples of `mean`. `sd`, `var`, and `entropy` are similar.
>>> ans = b1.mean() # return 0.5
>>> print(ans)
0.5
>>> ans = b1.mean(probs_b) # return probs_b
>>> print(ans)
[0.2 0.5 0.4]
>>> # `probs` must be passed in during function calls.
>>> ans = b2.mean(probs_a)
[0.6]
>>> print(ans)
>>> # Interfaces of `kl_loss` and `cross_entropy` are the same as follows:
>>> # Args:
>>> # dist (str): the name of the distribution. Only 'Bernoulli' is supported.
>>> # probs1_b (Tensor): the probability of success of distribution b.
>>> # probs1_a (Tensor): the probability of success of distribution a. Default: self.probs.
>>> # Examples of kl_loss. `cross_entropy` is similar.
>>> ans = b1.kl_loss('Bernoulli', probs_b)
>>> print(ans)
[0.22314356 0. 0.02041098]
>>> ans = b1.kl_loss('Bernoulli', probs_b, probs_a)
>>> print(ans)
[0.38190854 0.02013553 0.08109301]
>>> # An additional `probs_a` must be passed in.
>>> ans = b2.kl_loss('Bernoulli', probs_b, probs_a)
>>> print(ans)
[0.38190854 0.02013553 0.08109301]
>>> # Examples of `sample`.
>>> # Args:
>>> # shape (tuple): the shape of the sample. Default: ().
>>> # probs1 (Tensor): the probability of success. Default: self.probs.
>>> ans = b1.sample()
>>> print(ans.shape)
()
>>> ans = b1.sample((2,3))
>>> print(ans.shape)
(2, 3)
>>> ans = b1.sample((2,3), probs_b)
>>> print(ans.shape)
(2, 3, 3)
>>> ans = b2.sample((2,3), probs_a)
>>> print(ans.shape)
(2, 3, 1)
"""

def __init__(self,


+ 79
- 66
mindspore/nn/probability/distribution/exponential.py View File

@@ -39,75 +39,88 @@ class Exponential(Distribution):
`dtype` must be a float type because Exponential distributions are continuous.

Examples:
>>> # To initialize an Exponential distribution of the rate 0.5.
>>> import mindspore
>>> import mindspore.context as context
>>> import mindspore.nn as nn
>>> import mindspore.nn.probability.distribution as msd
>>> e = msd.Exponential(0.5, dtype=mstype.float32)
>>>
>>> # The following creates two independent Exponential distributions.
>>> e = msd.Exponential([0.5, 0.5], dtype=mstype.float32)
>>>
>>> from mindspore import Tensor
>>> context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
>>> # To initialize a Bernoulli distribution of the probability 0.5.
>>> e1 = msd.Exponential(0.5, dtype=mindspore.float32)
>>> # An Exponential distribution can be initialized without arguments.
>>> # In this case, `rate` must be passed in through `args` during function calls.
>>> e = msd.Exponential(dtype=mstype.float32)
>>>
>>> # To use an Exponential distribution in a network.
>>> class net(Cell):
... def __init__(self):
... super(net, self).__init__():
... self.e1 = msd.Exponential(0.5, dtype=mstype.float32)
... self.e2 = msd.Exponential(dtype=mstype.float32)
...
... # All the following calls in construct are valid.
... def construct(self, value, rate_b, rate_a):
...
... # Private interfaces of probability functions corresponding to public interfaces, including
... # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, are the same as follows.
... # Args:
... # value (Tensor): the value to be evaluated.
... # rate (Tensor): the rate of the distribution. Default: self.rate.
...
... # Examples of `prob`.
... # Similar calls can be made to other probability functions
... # by replacing `prob` by the name of the function.
... ans = self.e1.prob(value)
... # Evaluate with respect to distribution b.
... ans = self.e1.prob(value, rate_b)
... # `rate` must be passed in during function calls.
... ans = self.e2.prob(value, rate_a)
...
...
... # Functions `mean`, `sd`, 'var', and 'entropy' have the same arguments as follows.
... # Args:
... # rate (Tensor): the rate of the distribution. Default: self.rate.
...
... # Examples of `mean`. `sd`, `var`, and `entropy` are similar.
... ans = self.e1.mean() # return 2
... ans = self.e1.mean(rate_b) # return 1 / rate_b
... # `rate` must be passed in during function calls.
... ans = self.e2.mean(rate_a)
...
...
... # Interfaces of `kl_loss` and `cross_entropy` are the same.
... # Args:
... # dist (str): The name of the distribution. Only 'Exponential' is supported.
... # rate_b (Tensor): the rate of distribution b.
... # rate_a (Tensor): the rate of distribution a. Default: self.rate.
...
... # Examples of `kl_loss`. `cross_entropy` is similar.
... ans = self.e1.kl_loss('Exponential', rate_b)
... ans = self.e1.kl_loss('Exponential', rate_b, rate_a)
... # An additional `rate` must be passed in.
... ans = self.e2.kl_loss('Exponential', rate_b, rate_a)
...
...
... # Examples of `sample`.
... # Args:
... # shape (tuple): the shape of the sample. Default: ()
... # probs1 (Tensor): the rate of the distribution. Default: self.rate.
... ans = self.e1.sample()
... ans = self.e1.sample((2,3))
... ans = self.e1.sample((2,3), rate_b)
... ans = self.e2.sample((2,3), rate_a)
>>> e2 = msd.Exponential(dtype=mindspore.float32)

>>> # Here are some tensors used below for testing
>>> value = Tensor([1, 2, 3], dtype=mindspore.float32)
>>> rate_a = Tensor([0.6], dtype=mindspore.float32)
>>> rate_b = Tensor([0.2, 0.5, 0.4], dtype=mindspore.float32)

>>> # Private interfaces of probability functions corresponding to public interfaces, including
>>> # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, are the same as follows.
>>> # Args:
>>> # value (Tensor): the value to be evaluated.
>>> # rate (Tensor): the rate of the distribution. Default: self.rate.
>>> # Examples of `prob`.
>>> # Similar calls can be made to other probability functions
>>> # by replacing `prob` by the name of the function.
>>> ans = e1.prob(value)
>>> print(ans)
[0.30326533 0.18393973 0.11156508]
>>> # Evaluate with respect to distribution b.
>>> ans = e1.prob(value, rate_b)
>>> print(ans)
[0.16374613 0.18393973 0.12047768]
>>> # `rate` must be passed in during function calls.
>>> ans = e2.prob(value, rate_a)
>>> print(ans)
[0.329287 0.18071651 0.09917933]
>>> # Functions `mean`, `sd`, 'var', and 'entropy' have the same arguments as follows.
>>> # Args:
>>> # rate (Tensor): the rate of the distribution. Default: self.rate.
>>> # Examples of `mean`. `sd`, `var`, and `entropy` are similar.
>>> ans = e1.mean() # return 2
>>> print(ans)
2.0
>>> ans = e1.mean(rate_b) # return 1 / rate_b
>>> print(ans)
[5. 2. 2.5]
>>> # `rate` must be passed in during function calls.
>>> ans = e2.mean(rate_a)
>>> print(ans)
[1.6666666]
>>> # Interfaces of `kl_loss` and `cross_entropy` are the same.
>>> # Args:
>>> # dist (str): The name of the distribution. Only 'Exponential' is supported.
>>> # rate_b (Tensor): the rate of distribution b.
>>> # rate_a (Tensor): the rate of distribution a. Default: self.rate.
>>> # Examples of `kl_loss`. `cross_entropy` is similar.
>>> ans = e1.kl_loss('Exponential', rate_b)
>>> print(ans)
[0.31629074 0. 0.02314353]
>>> ans = e1.kl_loss('Exponential', rate_b, rate_a)
>>> print(ans)
[0.43194556 0.01565492 0.07213175]
>>> # An additional `rate` must be passed in.
>>> ans = e2.kl_loss('Exponential', rate_b, rate_a)
>>> print(ans)
[0.43194556 0.01565492 0.07213175]
>>> # Examples of `sample`.
>>> # Args:
>>> # shape (tuple): the shape of the sample. Default: ()
>>> # probs1 (Tensor): the rate of the distribution. Default: self.rate.
>>> ans = e1.sample()
>>> print(ans.shape)
()
>>> ans = e1.sample((2,3))
>>> print(ans.shape)
(2, 3)
>>> ans = e1.sample((2,3), rate_b)
>>> print(ans.shape)
(2, 3, 3)
>>> ans = e2.sample((2,3), rate_a)
>>> print(ans.shape)
(2, 3, 1)
"""

def __init__(self,


+ 77
- 66
mindspore/nn/probability/distribution/geometric.py View File

@@ -40,75 +40,86 @@ class Geometric(Distribution):
`dist_spec_args` is `probs`.

Examples:
>>> # To initialize a Geometric distribution of the probability 0.5.
>>> import mindspore
>>> import mindspore.nn as nn
>>> import mindspore.nn.probability.distribution as msd
>>> n = msd.Geometric(0.5, dtype=mstype.int32)
>>>
>>> # The following creates two independent Geometric distributions.
>>> n = msd.Geometric([0.5, 0.5], dtype=mstype.int32)
>>>
>>> # A Geometric distribution can be initialized without arguments.
>>> from mindspore import Tensor
>>> # To initialize a Bernoulli distribution of the probability 0.5.
>>> g1 = msd.Geometric(0.5, dtype=mindspore.int32)
>>> # A Bernoulli distribution can be initialized without arguments.
>>> # In this case, `probs` must be passed in through arguments during function calls.
>>> n = msd.Geometric(dtype=mstype.int32)
>>> g2 = msd.Geometric(dtype=mindspore.int32)
>>>
>>> # Here are some tensors used below for testing
>>> value = Tensor([1, 0, 1], dtype=mindspore.int32)
>>> probs_a = Tensor([0.6], dtype=mindspore.float32)
>>> probs_b = Tensor([0.2, 0.5, 0.4], dtype=mindspore.float32)
>>>
>>> # To use a Geometric distribution in a network.
>>> class net(Cell):
... def __init__(self):
... super(net, self).__init__():
... self.g1 = msd.Geometric(0.5, dtype=mstype.int32)
... self.g2 = msd.Geometric(dtype=mstype.int32)
...
... # The following calls are valid in construct.
... def construct(self, value, probs_b, probs_a):
...
... # Private interfaces of probability functions corresponding to public interfaces, including
... # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, have the same arguments as follows.
... # Args:
... # value (Tensor): the value to be evaluated.
... # probs1 (Tensor): the probability of success of a Bernoulli trail. Default: self.probs.
...
... # Examples of `prob`.
... # Similar calls can be made to other probability functions
... # by replacing `prob` by the name of the function.
... ans = self.g1.prob(value)
... # Evaluate with respect to distribution b.
... ans = self.g1.prob(value, probs_b)
... # `probs` must be passed in during function calls.
... ans = self.g2.prob(value, probs_a)
...
...
... # Functions `mean`, `sd`, `var`, and `entropy` have the same arguments.
... # Args:
... # probs1 (Tensor): the probability of success of a Bernoulli trail. Default: self.probs.
...
... # Examples of `mean`. `sd`, `var`, and `entropy` are similar.
... ans = self.g1.mean() # return 1.0
... ans = self.g1.mean(probs_b)
... # Probs must be passed in during function calls
... ans = self.g2.mean(probs_a)
...
...
... # Interfaces of 'kl_loss' and 'cross_entropy' are the same.
... # Args:
... # dist (str): the name of the distribution. Only 'Geometric' is supported.
... # probs1_b (Tensor): the probability of success of a Bernoulli trail of distribution b.
... # probs1_a (Tensor): the probability of success of a Bernoulli trail of distribution a. Default: self.probs.
...
... # Examples of `kl_loss`. `cross_entropy` is similar.
... ans = self.g1.kl_loss('Geometric', probs_b)
... ans = self.g1.kl_loss('Geometric', probs_b, probs_a)
... # An additional `probs` must be passed in.
... ans = self.g2.kl_loss('Geometric', probs_b, probs_a)
...
...
... # Examples of `sample`.
... # Args:
... # shape (tuple): the shape of the sample. Default: ()
... # probs1 (Tensor): the probability of success of a Bernoulli trail. Default: self.probs.
... ans = self.g1.sample()
... ans = self.g1.sample((2,3))
... ans = self.g1.sample((2,3), probs_b)
... ans = self.g2.sample((2,3), probs_a)
>>> # Private interfaces of probability functions corresponding to public interfaces, including
>>> # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, have the same arguments as follows.
>>> # Args:
>>> # value (Tensor): the value to be evaluated.
>>> # probs1 (Tensor): the probability of success of a Bernoulli trail. Default: self.probs.
>>> # Examples of `prob`.
>>> # Similar calls can be made to other probability functions
>>> # by replacing `prob` by the name of the function.
>>> ans = g1.prob(value)
>>> print(ans)
[0.25 0.5 0.25]
>>> # Evaluate with respect to distribution b.
>>> ans = g1.prob(value, probs_b)
>>> print(ans)
[0.16 0.5 0.24]
>>> # `probs` must be passed in during function calls.
>>> ans = g2.prob(value, probs_a)
>>> print(ans)
[0.24 0.6 0.24]
>>> # Functions `mean`, `sd`, `var`, and `entropy` have the same arguments.
>>> # Args:
>>> # probs1 (Tensor): the probability of success of a Bernoulli trail. Default: self.probs.
>>> # Examples of `mean`. `sd`, `var`, and `entropy` are similar.
>>> ans = g1.mean() # return 1.0
1.0
>>> print(ans)
>>> ans = g1.mean(probs_b)
>>> print(ans)
[4. 1. 1.5]
>>> # Probs must be passed in during function calls
>>> ans = g2.mean(probs_a)
>>> print(ans)
[0.6666666]
>>> # Interfaces of 'kl_loss' and 'cross_entropy' are the same.
>>> # Args:
>>> # dist (str): the name of the distribution. Only 'Geometric' is supported.
>>> # probs1_b (Tensor): the probability of success of a Bernoulli trail of distribution b.
>>> # probs1_a (Tensor): the probability of success of a Bernoulli trail of distribution a. Default: self.probs.
>>> # Examples of `kl_loss`. `cross_entropy` is similar.
>>> ans = g1.kl_loss('Geometric', probs_b)
>>> print(ans)
[0.44628713 0. 0.04082197]
>>> ans = g1.kl_loss('Geometric', probs_b, probs_a)
>>> print(ans)
[0.6365142 0.0335592 0.13515502]
>>> # An additional `probs` must be passed in.
>>> ans = g2.kl_loss('Geometric', probs_b, probs_a)
>>> print(ans)
[0.6365142 0.0335592 0.13515502]
>>> # Examples of `sample`.
>>> # Args:
>>> # shape (tuple): the shape of the sample. Default: ()
>>> # probs1 (Tensor): the probability of success of a Bernoulli trail. Default: self.probs.
>>> ans = g1.sample()
>>> print(ans.shape)
()
>>> ans = g1.sample((2,3))
>>> print(ans.shape)
(2, 3)
>>> ans = g1.sample((2,3), probs_b)
>>> print(ans.shape)
(2, 3, 3)
>>> ans = g2.sample((2,3), probs_a)
>>> print(ans.shape)
(2, 3, 1)
"""

def __init__(self,


+ 84
- 70
mindspore/nn/probability/distribution/normal.py View File

@@ -40,79 +40,93 @@ class Normal(Distribution):
`dtype` must be a float type because Normal distributions are continuous.

Examples:
>>> # To initialize a Normal distribution of the mean 3.0 and the standard deviation 4.0.
>>> import mindspore
>>> import mindspore.nn as nn
>>> import mindspore.nn.probability.distribution as msd
>>> n = msd.Normal(3.0, 4.0, dtype=mstype.float32)
>>>
>>> # The following creates two independent Normal distributions.
>>> n = msd.Normal([3.0, 3.0], [4.0, 4.0], dtype=mstype.float32)
>>>
>>> from mindspore import Tensor
>>> # To initialize a Normal distribution of the mean 3.0 and the standard deviation 4.0.
>>> n1 = msd.Normal(3.0, 4.0, dtype=mindspore.float32)
>>> # A Normal distribution can be initialized without arguments.
>>> # In this case, `mean` and `sd` must be passed in through arguments.
>>> n = msd.Normal(dtype=mstype.float32)
>>>
>>> # To use a Normal distribution in a network.
>>> class net(Cell):
... def __init__(self):
... super(net, self).__init__():
... self.n1 = msd.Normal(0.0, 1.0, dtype=mstype.float32)
... self.n2 = msd.Normal(dtype=mstype.float32)
...
... # The following calls are valid in construct.
... def construct(self, value, mean_b, sd_b, mean_a, sd_a):
...
... # Private interfaces of probability functions corresponding to public interfaces, including
... # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, have the same arguments as follows.
... # Args:
... # value (Tensor): the value to be evaluated.
... # mean (Tensor): the mean of the distribution. Default: self._mean_value.
... # sd (Tensor): the standard deviation of the distribution. Default: self._sd_value.
...
... # Examples of `prob`.
... # Similar calls can be made to other probability functions
... # by replacing 'prob' by the name of the function
... ans = self.n1.prob(value)
... # Evaluate with respect to the distribution b.
... ans = self.n1.prob(value, mean_b, sd_b)
... # `mean` and `sd` must be passed in during function calls
... ans = self.n2.prob(value, mean_a, sd_a)
...
...
... # Functions `mean`, `sd`, `var`, and `entropy` have the same arguments.
... # Args:
... # mean (Tensor): the mean of the distribution. Default: self._mean_value.
... # sd (Tensor): the standard deviation of the distribution. Default: self._sd_value.
...
... # Example of `mean`. `sd`, `var`, and `entropy` are similar.
... ans = self.n1.mean() # return 0.0
... ans = self.n1.mean(mean_b, sd_b) # return mean_b
... # `mean` and `sd` must be passed in during function calls.
... ans = self.n2.mean(mean_a, sd_a)
...
...
... # Interfaces of 'kl_loss' and 'cross_entropy' are the same:
... # Args:
... # dist (str): the type of the distributions. Only "Normal" is supported.
... # mean_b (Tensor): the mean of distribution b.
... # sd_b (Tensor): the standard deviation of distribution b.
... # mean_a (Tensor): the mean of distribution a. Default: self._mean_value.
... # sd_a (Tensor): the standard deviation of distribution a. Default: self._sd_value.
...
... # Examples of `kl_loss`. `cross_entropy` is similar.
... ans = self.n1.kl_loss('Normal', mean_b, sd_b)
... ans = self.n1.kl_loss('Normal', mean_b, sd_b, mean_a, sd_a)
... # Additional `mean` and `sd` must be passed in.
... ans = self.n2.kl_loss('Normal', mean_b, sd_b, mean_a, sd_a)
...
... # Examples of `sample`.
... # Args:
... # shape (tuple): the shape of the sample. Default: ()
... # mean (Tensor): the mean of the distribution. Default: self._mean_value.
... # sd (Tensor): the standard deviation of the distribution. Default: self._sd_value.
... ans = self.n1.sample()
... ans = self.n1.sample((2,3))
... ans = self.n1.sample((2,3), mean_b, sd_b)
... ans = self.n2.sample((2,3), mean_a, sd_a)
>>> n2 = msd.Normal(dtype=mindspore.float32)

>>> # Here are some tensors used below for testing
>>> value = Tensor([1.0, 2.0, 3.0], dtype=mindspore.float32)
>>> mean_a = Tensor([2.0], dtype=mindspore.float32)
>>> sd_a = Tensor([2.0, 2.0, 2.0], dtype=mindspore.float32)
>>> mean_b = Tensor([1.0], dtype=mindspore.float32)
>>> sd_b = Tensor([1.0, 1.5, 2.0], dtype=mindspore.float32)

>>> # Private interfaces of probability functions corresponding to public interfaces, including
>>> # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, have the same arguments as follows.
>>> # Args:
>>> # value (Tensor): the value to be evaluated.
>>> # mean (Tensor): the mean of the distribution. Default: self._mean_value.
>>> # sd (Tensor): the standard deviation of the distribution. Default: self._sd_value.
>>> # Examples of `prob`.
>>> # Similar calls can be made to other probability functions
>>> # by replacing 'prob' by the name of the function
>>> ans = n1.prob(value)
>>> print(ans)
[0.08801632 0.09666702 0.09973556]
>>> # Evaluate with respect to the distribution b.
>>> ans = n1.prob(value, mean_b, sd_b)
>>> print(ans)
[0.3989423 0.21296532 0.12098535]
>>> # `mean` and `sd` must be passed in during function calls
>>> ans = n2.prob(value, mean_a, sd_a)
>>> print(ans)
[0.17603266 0.19947115 0.17603266]
>>> # Functions `mean`, `sd`, `var`, and `entropy` have the same arguments.
>>> # Args:
>>> # mean (Tensor): the mean of the distribution. Default: self._mean_value.
>>> # sd (Tensor): the standard deviation of the distribution. Default: self._sd_value.
>>> # Example of `mean`. `sd`, `var`, and `entropy` are similar.
>>> ans = n1.mean() # return 0.0
>>> print(ans)
3.0
>>> ans = n1.mean(mean_b, sd_b) # return mean_b
>>> print(ans)
[1. 1. 1.]
>>> # `mean` and `sd` must be passed in during function calls.
>>> ans = n2.mean(mean_a, sd_a)
>>> print(ans)
[2. 2. 2.]
>>> # Interfaces of 'kl_loss' and 'cross_entropy' are the same:
>>> # Args:
>>> # dist (str): the type of the distributions. Only "Normal" is supported.
>>> # mean_b (Tensor): the mean of distribution b.
>>> # sd_b (Tensor): the standard deviation of distribution b.
>>> # mean_a (Tensor): the mean of distribution a. Default: self._mean_value.
>>> # sd_a (Tensor): the standard deviation of distribution a. Default: self._sd_value.
>>> # Examples of `kl_loss`. `cross_entropy` is similar.
>>> ans = n1.kl_loss('Normal', mean_b, sd_b)
>>> print(ans)
[8.113706 2.963615 1.3068528]
>>> ans = n1.kl_loss('Normal', mean_b, sd_b, mean_a, sd_a)
>>> print(ans)
[1.3068528 0.32342905 0.125 ]
>>> # Additional `mean` and `sd` must be passed in.
>>> ans = n2.kl_loss('Normal', mean_b, sd_b, mean_a, sd_a)
>>> print(ans)
[1.3068528 0.32342905 0.125 ]
>>> # Examples of `sample`.
>>> # Args:
>>> # shape (tuple): the shape of the sample. Default: ()
>>> # mean (Tensor): the mean of the distribution. Default: self._mean_value.
>>> # sd (Tensor): the standard deviation of the distribution. Default: self._sd_value.
>>> ans = n1.sample()
>>> print(ans.shape)
()
>>> ans = n1.sample((2,3))
>>> print(ans.shape)
(2, 3)
>>> ans = n1.sample((2,3), mean_b, sd_b)
>>> print(ans.shape)
(2, 3, 3)
>>> ans = n2.sample((2,3), mean_a, sd_a)
>>> print(ans.shape)
(2, 3, 3)
"""

def __init__(self,


+ 19
- 16
mindspore/nn/probability/distribution/transformed_distribution.py View File

@@ -49,24 +49,27 @@ class TransformedDistribution(Distribution):
`reset_parameters` followed by `add_parameter`.

Examples:
>>> # To initialize a transformed distribution, e.g. a lognormal distribution,
>>> # using a Normal distribution as the base distribution, and an Exp bijector as the bijector function.
>>> import mindspore
>>> import mindspore.context as context
>>> import mindspore.nn as nn
>>> import mindspore.nn.probability.distribution as msd
>>> import mindspore.nn.probability.bijector as msb
>>> ln = msd.TransformedDistribution(msb.Exp(),
... msd.Normal(0.0, 1.0, dtype=mstype.float32))
...
>>> # To use a transformed distribution in a network.
>>> class net(Cell):
... def __init__(self):
... super(net, self).__init__():
... self.ln = msd.TransformedDistribution(msb.Exp(),
... msd.Normal(0.0, 1.0, dtype=mstype.float32))
...
... def construct(self, value):
... # Similar calls can be made to other functions
... # by replacing 'sample' by the name of the function.
... ans = self.ln.sample(shape=(2, 3))
>>> from mindspore import Tensor
>>> context.set_context(mode=1)
>>>
>>> # To initialize a transformed distribution
>>> # using a Normal distribution as the base distribution,
>>> # and an Exp bijector as the bijector function.
>>> trans_dist = msd.TransformedDistribution(msb.Exp(),
>>> msd.Normal(0.0, 1.0))
>>>
>>> value = Tensor([1.0, 2.0, 3.0], dtype=mindspore.float32)
>>> prob = trans_dist.prob(value)
>>> print(prob)
[0.3989423 0.15687403 0.07272825]
>>> sample = trans_dist.sample(shape=(2, 3))
>>> print(sample.shape)
(2, 3)
"""

def __init__(self,


+ 84
- 69
mindspore/nn/probability/distribution/uniform.py View File

@@ -39,79 +39,94 @@ class Uniform(Distribution):
`dtype` must be float type because Uniform distributions are continuous.

Examples:
>>> # To initialize a Uniform distribution of the lower bound 0.0 and the higher bound 1.0.
>>> import mindspore
>>> import mindspore.context as context
>>> import mindspore.nn as nn
>>> import mindspore.nn.probability.distribution as msd
>>> u = msd.Uniform(0.0, 1.0, dtype=mstype.float32)
>>>
>>> # The following creates two independent Uniform distributions.
>>> u = msd.Uniform([0.0, 0.0], [1.0, 2.0], dtype=mstype.float32)
>>>
>>> from mindspore import Tensor
>>> context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
>>> # To initialize a Uniform distribution of the lower bound 0.0 and the higher bound 1.0.
>>> u1 = msd.Uniform(0.0, 1.0, dtype=mindspore.float32)
>>> # A Uniform distribution can be initialized without arguments.
>>> # In this case, `high` and `low` must be passed in through arguments during function calls.
>>> u = msd.Uniform(dtype=mstype.float32)
>>> u2 = msd.Uniform(dtype=mindspore.float32)
>>>
>>> # To use a Uniform distribution in a network.
>>> class net(Cell):
... def __init__(self)
... super(net, self).__init__():
... self.u1 = msd.Uniform(0.0, 1.0, dtype=mstype.float32)
... self.u2 = msd.Uniform(dtype=mstype.float32)
...
... # All the following calls in construct are valid.
... def construct(self, value, low_b, high_b, low_a, high_a):
...
... # Private interfaces of probability functions corresponding to public interfaces, including
... # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, have the same arguments.
... # Args:
... # value (Tensor): the value to be evaluated.
... # low (Tensor): the lower bound of the distribution. Default: self.low.
... # high (Tensor): the higher bound of the distribution. Default: self.high.
...
... # Examples of `prob`.
... # Similar calls can be made to other probability functions
... # by replacing 'prob' by the name of the function.
... ans = self.u1.prob(value)
... # Evaluate with respect to distribution b.
... ans = self.u1.prob(value, low_b, high_b)
... # `high` and `low` must be passed in during function calls.
... ans = self.u2.prob(value, low_a, high_a)
...
...
... # Functions `mean`, `sd`, `var`, and `entropy` have the same arguments.
... # Args:
... # low (Tensor): the lower bound of the distribution. Default: self.low.
... # high (Tensor): the higher bound of the distribution. Default: self.high.
...
... # Examples of `mean`. `sd`, `var`, and `entropy` are similar.
... ans = self.u1.mean() # return 0.5
... ans = self.u1.mean(low_b, high_b) # return (low_b + high_b) / 2
... # `high` and `low` must be passed in during function calls.
... ans = self.u2.mean(low_a, high_a)
...
... # Interfaces of 'kl_loss' and 'cross_entropy' are the same.
... # Args:
... # dist (str): the type of the distributions. Should be "Uniform" in this case.
... # low_b (Tensor): the lower bound of distribution b.
... # high_b (Tensor): the upper bound of distribution b.
... # low_a (Tensor): the lower bound of distribution a. Default: self.low.
... # high_a (Tensor): the upper bound of distribution a. Default: self.high.
...
... # Examples of `kl_loss`. `cross_entropy` is similar.
... ans = self.u1.kl_loss('Uniform', low_b, high_b)
... ans = self.u1.kl_loss('Uniform', low_b, high_b, low_a, high_a)
... # Additional `high` and `low` must be passed in.
... ans = self.u2.kl_loss('Uniform', low_b, high_b, low_a, high_a)
...
...
... # Examples of `sample`.
... # Args:
... # shape (tuple): the shape of the sample. Default: ()
... # low (Tensor): the lower bound of the distribution. Default: self.low.
... # high (Tensor): the upper bound of the distribution. Default: self.high.
... ans = self.u1.sample()
... ans = self.u1.sample((2,3))
... ans = self.u1.sample((2,3), low_b, high_b)
... ans = self.u2.sample((2,3), low_a, high_a)
>>> # Here are some tensors used below for testing
>>> value = Tensor([0.5, 0.8], dtype=mindspore.float32)
>>> low_a = Tensor([0., 0.], dtype=mindspore.float32)
>>> high_a = Tensor([2.0, 4.0], dtype=mindspore.float32)
>>> low_b = Tensor([-1.5], dtype=mindspore.float32)
>>> high_b = Tensor([2.5, 5.], dtype=mindspore.float32)
>>> # Private interfaces of probability functions corresponding to public interfaces, including
>>> # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, have the same arguments.
>>> # Args:
>>> # value (Tensor): the value to be evaluated.
>>> # low (Tensor): the lower bound of the distribution. Default: self.low.
>>> # high (Tensor): the higher bound of the distribution. Default: self.high.
>>> # Examples of `prob`.
>>> # Similar calls can be made to other probability functions
>>> # by replacing 'prob' by the name of the function.
>>> ans = u1.prob(value)
>>> print(ans)
[1. 1.]
>>> # Evaluate with respect to distribution b.
>>> ans = u1.prob(value, low_b, high_b)
>>> print(ans)
[0.25 0.15384614]
>>> # `high` and `low` must be passed in during function calls.
>>> ans = u2.prob(value, low_a, high_a)
>>> print(ans)
[0.5 0.25]
>>> # Functions `mean`, `sd`, `var`, and `entropy` have the same arguments.
>>> # Args:
>>> # low (Tensor): the lower bound of the distribution. Default: self.low.
>>> # high (Tensor): the higher bound of the distribution. Default: self.high.
>>> # Examples of `mean`. `sd`, `var`, and `entropy` are similar.
>>> ans = u1.mean() # return 0.5
>>> print(ans)
0.5
>>> ans = u1.mean(low_b, high_b) # return (low_b + high_b) / 2
>>> print(ans)
[0.5 1.75]
>>> # `high` and `low` must be passed in during function calls.
>>> ans = u2.mean(low_a, high_a)
>>> print(ans)
[1. 2.]
>>> # Interfaces of 'kl_loss' and 'cross_entropy' are the same.
>>> # Args:
>>> # dist (str): the type of the distributions. Should be "Uniform" in this case.
>>> # low_b (Tensor): the lower bound of distribution b.
>>> # high_b (Tensor): the upper bound of distribution b.
>>> # low_a (Tensor): the lower bound of distribution a. Default: self.low.
>>> # high_a (Tensor): the upper bound of distribution a. Default: self.high.
>>> # Examples of `kl_loss`. `cross_entropy` is similar.
>>> ans = u1.kl_loss('Uniform', low_b, high_b)
>>> print(ans)
[1.3862944 1.8718022]
>>> ans = u1.kl_loss('Uniform', low_b, high_b, low_a, high_a)
>>> print(ans)
[0.6931472 0.48550785]
>>> # Additional `high` and `low` must be passed in.
>>> ans = u2.kl_loss('Uniform', low_b, high_b, low_a, high_a)
>>> print(ans)
[0.6931472 0.48550785]
>>> # Examples of `sample`.
>>> # Args:
>>> # shape (tuple): the shape of the sample. Default: ()
>>> # low (Tensor): the lower bound of the distribution. Default: self.low.
>>> # high (Tensor): the upper bound of the distribution. Default: self.high.
>>> ans = u1.sample()
>>> print(ans.shape)
()
>>> ans = u1.sample((2,3))
>>> print(ans.shape)
(2, 3)
>>> ans = u1.sample((2,3), low_b, high_b)
>>> print(ans.shape)
(2, 3, 2)
>>> ans = u2.sample((2,3), low_a, high_a)
>>> print(ans.shape)
(2, 3, 2)
"""

def __init__(self,


Loading…
Cancel
Save