Browse Source

clean code of probability

tags/v1.3.0
zhangxinfeng3 4 years ago
parent
commit
d86465602a
22 changed files with 151 additions and 141 deletions
  1. +46
    -0
      mindspore/nn/probability/bnn_layers/_util.py
  2. +20
    -53
      mindspore/nn/probability/bnn_layers/conv_variational.py
  3. +31
    -59
      mindspore/nn/probability/bnn_layers/dense_variational.py
  4. +5
    -0
      mindspore/nn/probability/bnn_layers/layer_distribution.py
  5. +3
    -2
      mindspore/nn/probability/distribution/_utils/__init__.py
  6. +2
    -0
      mindspore/nn/probability/distribution/_utils/custom_ops.py
  7. +12
    -2
      mindspore/nn/probability/distribution/_utils/utils.py
  8. +2
    -1
      mindspore/nn/probability/distribution/cauchy.py
  9. +2
    -1
      mindspore/nn/probability/distribution/gamma.py
  10. +3
    -2
      mindspore/nn/probability/distribution/geometric.py
  11. +1
    -0
      mindspore/nn/probability/distribution/gumbel.py
  12. +1
    -0
      mindspore/nn/probability/distribution/log_normal.py
  13. +2
    -1
      mindspore/nn/probability/distribution/logistic.py
  14. +2
    -1
      mindspore/nn/probability/distribution/normal.py
  15. +1
    -1
      mindspore/nn/probability/dpn/__init__.py
  16. +1
    -1
      mindspore/nn/probability/infer/__init__.py
  17. +4
    -2
      mindspore/nn/probability/transforms/transform_bnn.py
  18. +2
    -2
      mindspore/nn/probability/zhusuan/__init__.py
  19. +1
    -1
      mindspore/nn/probability/zhusuan/framework/__init__.py
  20. +6
    -8
      mindspore/nn/probability/zhusuan/framework/bn.py
  21. +1
    -1
      mindspore/nn/probability/zhusuan/variational/__init__.py
  22. +3
    -3
      tests/st/probability/zhusuan/vae/vae_mnist.py

+ 46
- 0
mindspore/nn/probability/bnn_layers/_util.py View File

@@ -0,0 +1,46 @@
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utility functions to help bnn layers."""
from mindspore.common.tensor import Tensor
from ...cell import Cell
def check_prior(prior_fn, arg_name):
"""check prior distribution of bnn layers."""
if isinstance(prior_fn, Cell):
prior = prior_fn
else:
prior = prior_fn()
for prior_name, prior_dist in prior.name_cells().items():
if prior_name != 'normal':
raise TypeError(f"The type of distribution of `{arg_name}` should be `normal`")
if not (isinstance(getattr(prior_dist, '_mean_value'), Tensor) and
isinstance(getattr(prior_dist, '_sd_value'), Tensor)):
raise TypeError(f"The input form of `{arg_name}` is incorrect")
return prior
def check_posterior(posterior_fn, shape, param_name, arg_name):
"""check posterior distribution of bnn layers."""
try:
posterior = posterior_fn(shape=shape, name=param_name)
except TypeError:
raise TypeError(f'The type of `{arg_name}` should be `NormalPosterior`')
finally:
pass
for posterior_name, _ in posterior.name_cells().items():
if posterior_name != 'normal':
raise TypeError(f"The type of distribution of `{arg_name}` should be `normal`")
return posterior

+ 20
- 53
mindspore/nn/probability/bnn_layers/conv_variational.py View File

@@ -14,11 +14,10 @@
# ============================================================================
"""Convolutional variational layers."""
from mindspore.ops import operations as P
from mindspore.common.tensor import Tensor
from mindspore._checkparam import twice
from ...layer.conv import _Conv
from ...cell import Cell
from .layer_distribution import NormalPrior, NormalPosterior
from .layer_distribution import NormalPrior, normal_post_fn
from ._util import check_prior, check_posterior
__all__ = ['ConvReparam']
@@ -39,9 +38,9 @@ class _ConvVariational(_Conv):
group=1,
has_bias=False,
weight_prior_fn=NormalPrior,
weight_posterior_fn=lambda name, shape: NormalPosterior(name=name, shape=shape),
weight_posterior_fn=normal_post_fn,
bias_prior_fn=NormalPrior,
bias_posterior_fn=lambda name, shape: NormalPosterior(name=name, shape=shape)):
bias_posterior_fn=normal_post_fn):
kernel_size = twice(kernel_size)
stride = twice(stride)
dilation = twice(dilation)
@@ -78,47 +77,15 @@ class _ConvVariational(_Conv):
self.in_channels // self.group, *self.kernel_size]
self.weight.requires_grad = False
if isinstance(weight_prior_fn, Cell):
self.weight_prior = weight_prior_fn
else:
self.weight_prior = weight_prior_fn()
for prior_name, prior_dist in self.weight_prior.name_cells().items():
if prior_name != 'normal':
raise TypeError("The type of distribution of `weight_prior_fn` should be `normal`")
if not (isinstance(getattr(prior_dist, '_mean_value'), Tensor) and
isinstance(getattr(prior_dist, '_sd_value'), Tensor)):
raise TypeError("The input form of `weight_prior_fn` is incorrect")
try:
self.weight_posterior = weight_posterior_fn(shape=self.shape, name='bnn_weight')
except TypeError:
raise TypeError('The input form of `weight_posterior_fn` is incorrect')
for posterior_name, _ in self.weight_posterior.name_cells().items():
if posterior_name != 'normal':
raise TypeError("The type of distribution of `weight_posterior_fn` should be `normal`")
self.weight_prior = check_prior(weight_prior_fn, "weight_prior_fn")
self.weight_posterior = check_posterior(weight_posterior_fn, shape=self.shape, param_name='bnn_weight',
arg_name="weight_posterior_fn")
if self.has_bias:
self.bias.requires_grad = False
if isinstance(bias_prior_fn, Cell):
self.bias_prior = bias_prior_fn
else:
self.bias_prior = bias_prior_fn()
for prior_name, prior_dist in self.bias_prior.name_cells().items():
if prior_name != 'normal':
raise TypeError("The type of distribution of `bias_prior_fn` should be `normal`")
if not (isinstance(getattr(prior_dist, '_mean_value'), Tensor) and
isinstance(getattr(prior_dist, '_sd_value'), Tensor)):
raise TypeError("The input form of `bias_prior_fn` is incorrect")
try:
self.bias_posterior = bias_posterior_fn(shape=[self.out_channels], name='bnn_bias')
except TypeError:
raise TypeError('The type of `bias_posterior_fn` should be `NormalPosterior`')
for posterior_name, _ in self.bias_posterior.name_cells().items():
if posterior_name != 'normal':
raise TypeError("The type of distribution of `bias_posterior_fn` should be `normal`")
self.bias_prior = check_prior(bias_prior_fn, "bias_prior_fn")
self.bias_posterior = check_posterior(bias_posterior_fn, shape=[self.out_channels], param_name='bnn_bias',
arg_name="bias_posterior_fn")
# mindspore operations
self.bias_add = P.BiasAdd()
@@ -135,23 +102,23 @@ class _ConvVariational(_Conv):
self.sum = P.ReduceSum()
def construct(self, inputs):
outputs = self._apply_variational_weight(inputs)
outputs = self.apply_variational_weight(inputs)
if self.has_bias:
outputs = self._apply_variational_bias(outputs)
outputs = self.apply_variational_bias(outputs)
return outputs
def extend_repr(self):
s = 'in_channels={}, out_channels={}, kernel_size={}, stride={}, pad_mode={}, ' \
'padding={}, dilation={}, group={}, weight_mean={}, weight_std={}, has_bias={}'\
'padding={}, dilation={}, group={}, weight_mean={}, weight_std={}, has_bias={}' \
.format(self.in_channels, self.out_channels, self.kernel_size, self.stride, self.pad_mode, self.padding,
self.dilation, self.group, self.weight_posterior.mean, self.weight_posterior.untransformed_std,
self.has_bias)
if self.has_bias:
s += ', bias_mean={}, bias_std={}'\
s += ', bias_mean={}, bias_std={}' \
.format(self.bias_posterior.mean, self.bias_posterior.untransformed_std)
return s
def _apply_variational_bias(self, inputs):
def apply_variational_bias(self, inputs):
bias_posterior_tensor = self.bias_posterior("sample")
return self.bias_add(inputs, bias_posterior_tensor)
@@ -230,7 +197,7 @@ class ConvReparam(_ConvVariational):
normal distribution). The current version only supports normal distribution.
weight_posterior_fn: The posterior distribution for sampling weight.
It must be a function handle which returns a mindspore
distribution instance. Default: lambda name, shape: NormalPosterior(name=name, shape=shape).
distribution instance. Default: normal_post_fn.
The current version only supports normal distribution.
bias_prior_fn: The prior distribution for bias vector. It must return
a mindspore distribution. Default: NormalPrior(which creates an
@@ -238,7 +205,7 @@ class ConvReparam(_ConvVariational):
only supports normal distribution.
bias_posterior_fn: The posterior distribution for sampling bias vector.
It must be a function handle which returns a mindspore
distribution instance. Default: lambda name, shape: NormalPosterior(name=name, shape=shape).
distribution instance. Default: normal_post_fn.
The current version only supports normal distribution.
Inputs:
@@ -270,9 +237,9 @@ class ConvReparam(_ConvVariational):
group=1,
has_bias=False,
weight_prior_fn=NormalPrior,
weight_posterior_fn=lambda name, shape: NormalPosterior(name=name, shape=shape),
weight_posterior_fn=normal_post_fn,
bias_prior_fn=NormalPrior,
bias_posterior_fn=lambda name, shape: NormalPosterior(name=name, shape=shape)):
bias_posterior_fn=normal_post_fn):
super(ConvReparam, self).__init__(
in_channels,
out_channels,
@@ -289,7 +256,7 @@ class ConvReparam(_ConvVariational):
bias_posterior_fn=bias_posterior_fn
)
def _apply_variational_weight(self, inputs):
def apply_variational_weight(self, inputs):
weight_posterior_tensor = self.weight_posterior("sample")
outputs = self.conv2d(inputs, weight_posterior_tensor)
return outputs

+ 31
- 59
mindspore/nn/probability/bnn_layers/dense_variational.py View File

@@ -14,12 +14,12 @@
# ============================================================================
"""dense_variational"""
from mindspore.ops import operations as P
from mindspore.common.tensor import Tensor
from mindspore._checkparam import Validator
from ...cell import Cell
from ...layer.activation import get_activation
from ..distribution.normal import Normal
from .layer_distribution import NormalPrior, NormalPosterior
from .layer_distribution import NormalPrior, normal_post_fn
from ._util import check_prior, check_posterior

__all__ = ['DenseReparam', 'DenseLocalReparam']

@@ -36,52 +36,22 @@ class _DenseVariational(Cell):
activation=None,
has_bias=True,
weight_prior_fn=NormalPrior,
weight_posterior_fn=lambda name, shape: NormalPosterior(name=name, shape=shape),
weight_posterior_fn=normal_post_fn,
bias_prior_fn=NormalPrior,
bias_posterior_fn=lambda name, shape: NormalPosterior(name=name, shape=shape)):
bias_posterior_fn=normal_post_fn):
super(_DenseVariational, self).__init__()
self.in_channels = Validator.check_positive_int(in_channels)
self.out_channels = Validator.check_positive_int(out_channels)
self.has_bias = Validator.check_bool(has_bias)

if isinstance(weight_prior_fn, Cell):
self.weight_prior = weight_prior_fn
else:
self.weight_prior = weight_prior_fn()
for prior_name, prior_dist in self.weight_prior.name_cells().items():
if prior_name != 'normal':
raise TypeError("The type of distribution of `weight_prior_fn` should be `normal`")
if not (isinstance(getattr(prior_dist, '_mean_value'), Tensor) and
isinstance(getattr(prior_dist, '_sd_value'), Tensor)):
raise TypeError("The input form of `weight_prior_fn` is incorrect")

try:
self.weight_posterior = weight_posterior_fn(shape=[self.out_channels, self.in_channels], name='bnn_weight')
except TypeError:
raise TypeError('The type of `weight_posterior_fn` should be `NormalPosterior`')
for posterior_name, _ in self.weight_posterior.name_cells().items():
if posterior_name != 'normal':
raise TypeError("The type of distribution of `weight_posterior_fn` should be `normal`")
self.weight_prior = check_prior(weight_prior_fn, "weight_prior_fn")
self.weight_posterior = check_posterior(weight_posterior_fn, shape=[self.out_channels, self.in_channels],
param_name='bnn_weight', arg_name="weight_posterior_fn")

if self.has_bias:
if isinstance(bias_prior_fn, Cell):
self.bias_prior = bias_prior_fn
else:
self.bias_prior = bias_prior_fn()
for prior_name, prior_dist in self.bias_prior.name_cells().items():
if prior_name != 'normal':
raise TypeError("The type of distribution of `bias_prior_fn` should be `normal`")
if not (isinstance(getattr(prior_dist, '_mean_value'), Tensor) and
isinstance(getattr(prior_dist, '_sd_value'), Tensor)):
raise TypeError("The input form of `bias_prior_fn` is incorrect")

try:
self.bias_posterior = bias_posterior_fn(shape=[self.out_channels], name='bnn_bias')
except TypeError:
raise TypeError('The type of `bias_posterior_fn` should be `NormalPosterior`')
for posterior_name, _ in self.bias_posterior.name_cells().items():
if posterior_name != 'normal':
raise TypeError("The type of distribution of `bias_posterior_fn` should be `normal`")
self.bias_prior = check_prior(bias_prior_fn, "bias_prior_fn")
self.bias_posterior = check_posterior(bias_posterior_fn, shape=[self.out_channels], param_name='bnn_bias',
arg_name="bias_posterior_fn")

self.activation = activation
if not self.activation:
@@ -100,9 +70,9 @@ class _DenseVariational(Cell):
self.sum = P.ReduceSum()

def construct(self, x):
outputs = self._apply_variational_weight(x)
outputs = self.apply_variational_weight(x)
if self.has_bias:
outputs = self._apply_variational_bias(outputs)
outputs = self.apply_variational_bias(outputs)
if self.activation_flag:
outputs = self.activation(outputs)
return outputs
@@ -118,7 +88,7 @@ class _DenseVariational(Cell):
s += ', activation={}'.format(self.activation)
return s

def _apply_variational_bias(self, inputs):
def apply_variational_bias(self, inputs):
bias_posterior_tensor = self.bias_posterior("sample")
return self.bias_add(inputs, bias_posterior_tensor)

@@ -162,16 +132,17 @@ class DenseReparam(_DenseVariational):
in_channels (int): The number of input channel.
out_channels (int): The number of output channel .
has_bias (bool): Specifies whether the layer uses a bias vector. Default: False.
activation (str, Cell): A regularization function applied to the output of the layer. The type of `activation`
can be a string (eg. 'relu') or a Cell (eg. nn.ReLU()). Note that if the type of activation is Cell, it must
be instantiated beforehand. Default: None.
activation (str, Cell): A regularization function applied to the output of the layer.
The type of `activation` can be a string (eg. 'relu') or a Cell (eg. nn.ReLU()).
Note that if the type of activation is Cell, it must be instantiated beforehand.
Default: None.
weight_prior_fn: The prior distribution for weight.
It must return a mindspore distribution instance.
Default: NormalPrior. (which creates an instance of standard
normal distribution). The current version only supports normal distribution.
weight_posterior_fn: The posterior distribution for sampling weight.
It must be a function handle which returns a mindspore
distribution instance. Default: lambda name, shape: NormalPosterior(name=name, shape=shape).
distribution instance. Default: normal_post_fn.
The current version only supports normal distribution.
bias_prior_fn: The prior distribution for bias vector. It must return
a mindspore distribution. Default: NormalPrior(which creates an
@@ -179,7 +150,7 @@ class DenseReparam(_DenseVariational):
only supports normal distribution.
bias_posterior_fn: The posterior distribution for sampling bias vector.
It must be a function handle which returns a mindspore
distribution instance. Default: lambda name, shape: NormalPosterior(name=name, shape=shape).
distribution instance. Default: normal_post_fn.
The current version only supports normal distribution.

Inputs:
@@ -206,9 +177,9 @@ class DenseReparam(_DenseVariational):
activation=None,
has_bias=True,
weight_prior_fn=NormalPrior,
weight_posterior_fn=lambda name, shape: NormalPosterior(name=name, shape=shape),
weight_posterior_fn=normal_post_fn,
bias_prior_fn=NormalPrior,
bias_posterior_fn=lambda name, shape: NormalPosterior(name=name, shape=shape)):
bias_posterior_fn=normal_post_fn):
super(DenseReparam, self).__init__(
in_channels,
out_channels,
@@ -220,7 +191,7 @@ class DenseReparam(_DenseVariational):
bias_posterior_fn=bias_posterior_fn
)

def _apply_variational_weight(self, inputs):
def apply_variational_weight(self, inputs):
weight_posterior_tensor = self.weight_posterior("sample")
outputs = self.matmul(inputs, weight_posterior_tensor)
return outputs
@@ -250,16 +221,17 @@ class DenseLocalReparam(_DenseVariational):
in_channels (int): The number of input channel.
out_channels (int): The number of output channel .
has_bias (bool): Specifies whether the layer uses a bias vector. Default: False.
activation (str, Cell): A regularization function applied to the output of the layer. The type of `activation`
can be a string (eg. 'relu') or a Cell (eg. nn.ReLU()). Note that if the type of activation is Cell, it must
be instantiated beforehand. Default: None.
activation (str, Cell): A regularization function applied to the output of the layer.
The type of `activation` can be a string (eg. 'relu') or a Cell (eg. nn.ReLU()).
Note that if the type of activation is Cell, it must be instantiated beforehand.
Default: None.
weight_prior_fn: The prior distribution for weight.
It must return a mindspore distribution instance.
Default: NormalPrior. (which creates an instance of standard
normal distribution). The current version only supports normal distribution.
weight_posterior_fn: The posterior distribution for sampling weight.
It must be a function handle which returns a mindspore
distribution instance. Default: lambda name, shape: NormalPosterior(name=name, shape=shape).
distribution instance. Default: normal_post_fn.
The current version only supports normal distribution.
bias_prior_fn: The prior distribution for bias vector. It must return
a mindspore distribution. Default: NormalPrior(which creates an
@@ -267,7 +239,7 @@ class DenseLocalReparam(_DenseVariational):
only supports normal distribution.
bias_posterior_fn: The posterior distribution for sampling bias vector.
It must be a function handle which returns a mindspore
distribution instance. Default: lambda name, shape: NormalPosterior(name=name, shape=shape).
distribution instance. Default: normal_post_fn.
The current version only supports normal distribution.

Inputs:
@@ -294,9 +266,9 @@ class DenseLocalReparam(_DenseVariational):
activation=None,
has_bias=True,
weight_prior_fn=NormalPrior,
weight_posterior_fn=lambda name, shape: NormalPosterior(name=name, shape=shape),
weight_posterior_fn=normal_post_fn,
bias_prior_fn=NormalPrior,
bias_posterior_fn=lambda name, shape: NormalPosterior(name=name, shape=shape)):
bias_posterior_fn=normal_post_fn):
super(DenseLocalReparam, self).__init__(
in_channels,
out_channels,
@@ -311,7 +283,7 @@ class DenseLocalReparam(_DenseVariational):
self.square = P.Square()
self.normal = Normal()

def _apply_variational_weight(self, inputs):
def apply_variational_weight(self, inputs):
mean = self.matmul(inputs, self.weight_posterior("mean"))
std = self.sqrt(self.matmul(self.square(inputs), self.square(self.weight_posterior("sd"))))
weight_posterior_affine_tensor = self.normal("sample", mean=mean, sd=std)


+ 5
- 0
mindspore/nn/probability/bnn_layers/layer_distribution.py View File

@@ -115,3 +115,8 @@ class NormalPosterior(Cell):
def construct(self, *inputs):
std = self.std_trans(self.untransformed_std)
return self.normal(*inputs, mean=self.mean, sd=std)
def normal_post_fn(name, shape):
"""Provide normal posterior distribution."""
return NormalPosterior(name=name, shape=shape)

+ 3
- 2
mindspore/nn/probability/distribution/_utils/__init__.py View File

@@ -15,8 +15,9 @@
"""
Distribution operation utility functions.
"""
from .utils import *
from .custom_ops import *
from .custom_ops import exp_generic, log_generic, broadcast_to
from .utils import cast_to_tensor, check_greater_equal_zero, check_greater_zero
from .utils import check_greater, check_prob, CheckTensor, CheckTuple, set_param_type

__all__ = [
'cast_to_tensor',


+ 2
- 0
mindspore/nn/probability/distribution/_utils/custom_ops.py View File

@@ -17,6 +17,7 @@ import numpy as np
from mindspore.ops import operations as P
from mindspore.common import dtype as mstype


def exp_generic(input_x):
"""
Log op on Ascend doesn't support int types.
@@ -66,6 +67,7 @@ def log1p_generic(x):
"""
return log_generic(x + 1.0)


def broadcast_to(x, target):
"""
Broadcast x to the shape of target.


+ 12
- 2
mindspore/nn/probability/distribution/_utils/utils.py View File

@@ -24,6 +24,7 @@ from mindspore.ops import operations as P
from mindspore.ops.primitive import constexpr, PrimitiveWithInfer, prim_attr_register
import mindspore.nn as nn


def cast_to_tensor(t, hint_type=mstype.float32):
"""
Cast an user input value into a Tensor of dtype.
@@ -51,6 +52,7 @@ def cast_to_tensor(t, hint_type=mstype.float32):
raise TypeError(
f"Unable to convert input of type {invalid_type} to a Tensor of type {hint_type}")


def cast_type_for_device(dtype):
"""
use the alternative dtype supported by the device.
@@ -158,6 +160,7 @@ def check_prob(p):
if not comp.all():
raise ValueError('Probabilities should be less than one')


def check_sum_equal_one(probs):
"""
Used in categorical distribution. check if probabilities of each category sum to 1.
@@ -176,6 +179,7 @@ def check_sum_equal_one(probs):
if not comp:
raise ValueError('Probabilities for each category should sum to one for Categorical distribution.')


def check_rank(probs):
"""
Used in categorical distribution. check Rank >=1.
@@ -189,6 +193,7 @@ def check_rank(probs):
if probs.asnumpy().ndim == 0:
raise ValueError('probs for Categorical distribution must have rank >= 1.')


def logits_to_probs(logits, is_binary=False):
"""
converts logits into probabilities.
@@ -204,8 +209,6 @@ def logits_to_probs(logits, is_binary=False):
def clamp_probs(probs):
"""
clamp probs boundary
Args:
probs (Tensor)
"""
eps = P.Eps()(probs)
return C.clip_by_value(probs, eps, 1-eps)
@@ -229,29 +232,35 @@ def raise_none_error(name):
raise TypeError(f"the type {name} should be subclass of Tensor."
f" It should not be None since it is not specified during initialization.")


@constexpr
def raise_probs_logits_error():
raise TypeError("Either 'probs' or 'logits' must be specified, but not both.")


@constexpr
def raise_broadcast_error(shape_a, shape_b):
raise ValueError(f"Shape {shape_a} and {shape_b} is not broadcastable.")


@constexpr
def raise_not_impl_error(name):
raise ValueError(
f"{name} function should be implemented for non-linear transformation")


@constexpr
def raise_not_implemented_util(func_name, obj, *args, **kwargs):
raise NotImplementedError(
f"{func_name} is not implemented for {obj} distribution.")


@constexpr
def raise_type_error(name, cur_type, required_type):
raise TypeError(
f"For {name} , the type should be or be subclass of {required_type}, but got {cur_type}")


@constexpr
def raise_not_defined(func_name, obj, *args, **kwargs):
raise ValueError(
@@ -320,6 +329,7 @@ class CheckTensor(PrimitiveWithInfer):
return x
raise TypeError(f"For {name}, input type should be a Tensor or Parameter.")


def set_param_type(args, hint_type):
"""
Find the common type among arguments.


+ 2
- 1
mindspore/nn/probability/distribution/cauchy.py View File

@@ -60,7 +60,8 @@ class Cauchy(Distribution):
>>> loc_b = Tensor([1.0], dtype=mindspore.float32)
>>> scale_b = Tensor([1.0, 1.5, 2.0], dtype=mindspore.float32)
>>> # Private interfaces of probability functions corresponding to public interfaces, including
>>> # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, have the same arguments as follows.
>>> # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`,
>>> # have the same arguments as follows.
>>> # Args:
>>> # value (Tensor): the value to be evaluated.
>>> # loc (Tensor): the location of the distribution. Default: self.loc.


+ 2
- 1
mindspore/nn/probability/distribution/gamma.py View File

@@ -63,7 +63,8 @@ class Gamma(Distribution):
>>> rate_b = Tensor([1.0, 1.5, 2.0], dtype=mindspore.float32)
>>>
>>> # Private interfaces of probability functions corresponding to public interfaces, including
>>> # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, have the same arguments as follows.
>>> # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`,
>>> # have the same arguments as follows.
>>> # Args:
>>> # value (Tensor): the value to be evaluated.
>>> # concentration (Tensor): the concentration of the distribution. Default: self._concentration.


+ 3
- 2
mindspore/nn/probability/distribution/geometric.py View File

@@ -60,7 +60,8 @@ class Geometric(Distribution):
>>> probs_b = Tensor([0.2, 0.5, 0.4], dtype=mindspore.float32)
>>>
>>> # Private interfaces of probability functions corresponding to public interfaces, including
>>> # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, have the same arguments as follows.
>>> # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`,
>>> # have the same arguments as follows.
>>> # Args:
>>> # value (Tensor): the value to be evaluated.
>>> # probs1 (Tensor): the probability of success of a Bernoulli trial. Default: self.probs.
@@ -96,7 +97,7 @@ class Geometric(Distribution):
>>> # Args:
>>> # dist (str): the name of the distribution. Only 'Geometric' is supported.
>>> # probs1_b (Tensor): the probability of success of a Bernoulli trial of distribution b.
>>> # probs1_a (Tensor): the probability of success of a Bernoulli trial of distribution a. Default: self.probs.
>>> # probs1_a (Tensor): the probability of success of a Bernoulli trial of distribution a.
>>> # Examples of `kl_loss`. `cross_entropy` is similar.
>>> ans = g1.kl_loss('Geometric', probs_b)
>>> print(ans.shape)


+ 1
- 0
mindspore/nn/probability/distribution/gumbel.py View File

@@ -24,6 +24,7 @@ from .transformed_distribution import TransformedDistribution
from ._utils.utils import check_distribution_name
from ._utils.custom_ops import exp_generic, log_generic


class Gumbel(TransformedDistribution):
"""
Gumbel distribution.


+ 1
- 0
mindspore/nn/probability/distribution/log_normal.py View File

@@ -21,6 +21,7 @@ import mindspore.nn.probability.distribution as msd
from ._utils.utils import check_distribution_name
from ._utils.custom_ops import exp_generic, log_generic


class LogNormal(msd.TransformedDistribution):
"""
LogNormal distribution.


+ 2
- 1
mindspore/nn/probability/distribution/logistic.py View File

@@ -61,7 +61,8 @@ class Logistic(Distribution):
>>> scale_b = Tensor([1.0, 1.5, 2.0], dtype=mindspore.float32)
>>>
>>> # Private interfaces of probability functions corresponding to public interfaces, including
>>> # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, have the same arguments as follows.
>>> # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`,
>>> # have the same arguments as follows.
>>> # Args:
>>> # value (Tensor): the value to be evaluated.
>>> # loc (Tensor): the location of the distribution. Default: self.loc.


+ 2
- 1
mindspore/nn/probability/distribution/normal.py View File

@@ -61,7 +61,8 @@ class Normal(Distribution):
>>> sd_b = Tensor([1.0, 1.5, 2.0], dtype=mindspore.float32)

>>> # Private interfaces of probability functions corresponding to public interfaces, including
>>> # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, have the same arguments as follows.
>>> # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`,
>>> # have the same arguments as follows.
>>> # Args:
>>> # value (Tensor): the value to be evaluated.
>>> # mean (Tensor): the mean of the distribution. Default: self._mean_value.


+ 1
- 1
mindspore/nn/probability/dpn/__init__.py View File

@@ -16,7 +16,7 @@
Deep probability network such as BNN and VAE network.
"""

from .vae import *
from .vae import VAE, ConditionalVAE

__all__ = []
__all__.extend(vae.__all__)

+ 1
- 1
mindspore/nn/probability/infer/__init__.py View File

@@ -16,7 +16,7 @@
Inference algorithms in Probabilistic Programming.
"""

from .variational import *
from .variational import SVI, ELBO

__all__ = []
__all__.extend(variational.__all__)

+ 4
- 2
mindspore/nn/probability/transforms/transform_bnn.py View File

@@ -83,8 +83,10 @@ class TransformToBNN:

def transform_to_bnn_model(self,
get_dense_args=lambda dp: {"in_channels": dp.in_channels, "has_bias": dp.has_bias,
"out_channels": dp.out_channels, "activation": dp.activation},
get_conv_args=lambda dp: {"in_channels": dp.in_channels, "out_channels": dp.out_channels,
"out_channels": dp.out_channels,
"activation": dp.activation},
get_conv_args=lambda dp: {"in_channels": dp.in_channels,
"out_channels": dp.out_channels,
"pad_mode": dp.pad_mode, "kernel_size": dp.kernel_size,
"stride": dp.stride, "has_bias": dp.has_bias,
"padding": dp.padding, "dilation": dp.dilation,


+ 2
- 2
mindspore/nn/probability/zhusuan/__init__.py View File

@@ -14,5 +14,5 @@
# ============================================================================
""" Zhusuan package: a probalistic programming library """

from .framework import *
from .variational import *
from .framework import BayesianNet
from .variational import ELBO

+ 1
- 1
mindspore/nn/probability/zhusuan/framework/__init__.py View File

@@ -15,4 +15,4 @@

""" Core functionality for Zhusuan """

from .bn import *
from .bn import BayesianNet

+ 6
- 8
mindspore/nn/probability/zhusuan/framework/bn.py View File

@@ -34,7 +34,7 @@ class BayesianNet(nn.Cell):

self.reduce_sum = P.ReduceSum(keep_dims=True)

def Normal(self,
def normal(self,
name,
observation=None,
mean=None,
@@ -45,9 +45,8 @@ class BayesianNet(nn.Cell):
reparameterize=True):
""" Normal distribution wrapper """

assert not name is None
assert not seed is None
assert not dtype is None
if not isinstance(name, str):
raise TypeError("The type of `name` should be string")

if observation is None:
if reparameterize:
@@ -63,7 +62,7 @@ class BayesianNet(nn.Cell):
'log_prob', sample, mean, std), 1)
return sample, log_prob

def Bernoulli(self,
def bernoulli(self,
name,
observation=None,
probs=None,
@@ -72,9 +71,8 @@ class BayesianNet(nn.Cell):
shape=()):
""" Bernoulli distribution wrapper """

assert not name is None
assert not seed is None
assert not dtype is None
if not isinstance(name, str):
raise TypeError("The type of `name` should be string")

if observation is None:
sample = self.bernoulli_dist('sample', shape, probs)


+ 1
- 1
mindspore/nn/probability/zhusuan/variational/__init__.py View File

@@ -15,4 +15,4 @@

""" Variational inference related codes """

from .elbo import *
from .elbo import ELBO

+ 3
- 3
tests/st/probability/zhusuan/vae/vae_mnist.py View File

@@ -67,13 +67,13 @@ class Generator(zs.BayesianNet):

z_mean = self.zeros((self.batch_size, self.z_dim))
z_std = self.ones((self.batch_size, self.z_dim))
z, log_prob_z = self.Normal('latent', observation=z, mean=z_mean, std=z_std, shape=(), reparameterize=False)
z, log_prob_z = self.normal('latent', observation=z, mean=z_mean, std=z_std, shape=(), reparameterize=False)

x_mean = self.sigmoid(self.fc3(self.act2(self.fc2(self.act1(self.fc1(z))))))
if x is None:
#x = self.bernoulli_dist('sample', (), x_mean)
x = x_mean
x, log_prob_x = self.Bernoulli('data', observation=x, shape=(), probs=x_mean)
x, log_prob_x = self.bernoulli('data', observation=x, shape=(), probs=x_mean)

return x, log_prob_x, z, log_prob_z

@@ -109,7 +109,7 @@ class Variational(zs.BayesianNet):
z_mean = self.fc3(z_logit)
z_std = self.exp(self.fc4(z_logit))
#z, log_prob_z = self.reparameterization(z_mean, z_std)
z, log_prob_z = self.Normal('latent', observation=z, mean=z_mean, std=z_std, shape=(), reparameterize=True)
z, log_prob_z = self.normal('latent', observation=z, mean=z_mean, std=z_std, shape=(), reparameterize=True)
return z, log_prob_z

def main():


Loading…
Cancel
Save