| @@ -61,6 +61,12 @@ class _ConvVariational(_Conv): | |||
| raise ValueError('Attr \'pad_mode\' of \'Conv2d\' Op passed ' | |||
| + str(pad_mode) + ', should be one of values in \'valid\', \'same\', \'pad\'.') | |||
| if not isinstance(stride, (int, tuple)): | |||
| raise TypeError('The type of `stride` should be `int` of `tuple`') | |||
| if not isinstance(dilation, (int, tuple)): | |||
| raise TypeError('The type of `dilation` should be `int` of `tuple`') | |||
| # convolution args | |||
| self.in_channels = in_channels | |||
| self.out_channels = out_channels | |||
| @@ -87,13 +93,10 @@ class _ConvVariational(_Conv): | |||
| raise TypeError('The type of `weight_prior_fn` should be `NormalPrior`') | |||
| self.weight_prior = weight_prior_fn() | |||
| if isinstance(weight_posterior_fn, Cell): | |||
| if weight_posterior_fn.__class__.__name__ != 'NormalPosterior': | |||
| raise TypeError('The type of `weight_posterior_fn` should be `NormalPosterior`') | |||
| else: | |||
| if weight_posterior_fn.__name__ != 'NormalPosterior': | |||
| raise TypeError('The type of `weight_posterior_fn` should be `NormalPosterior`') | |||
| self.weight_posterior = weight_posterior_fn(shape=self.shape, name='bnn_weight') | |||
| try: | |||
| self.weight_posterior = weight_posterior_fn(shape=self.shape, name='bnn_weight') | |||
| except TypeError: | |||
| raise TypeError('The type of `weight_posterior_fn` should be `NormalPosterior`') | |||
| if self.has_bias: | |||
| self.bias.requires_grad = False | |||
| @@ -107,13 +110,10 @@ class _ConvVariational(_Conv): | |||
| raise TypeError('The type of `bias_prior_fn` should be `NormalPrior`') | |||
| self.bias_prior = bias_prior_fn() | |||
| if isinstance(bias_posterior_fn, Cell): | |||
| if bias_posterior_fn.__class__.__name__ != 'NormalPosterior': | |||
| raise TypeError('The type of `bias_posterior_fn` should be `NormalPosterior`') | |||
| else: | |||
| if bias_posterior_fn.__name__ != 'NormalPosterior': | |||
| raise TypeError('The type of `bias_posterior_fn` should be `NormalPosterior`') | |||
| self.bias_posterior = bias_posterior_fn(shape=[self.out_channels], name='bnn_bias') | |||
| try: | |||
| self.bias_posterior = bias_posterior_fn(shape=[self.out_channels], name='bnn_bias') | |||
| except TypeError: | |||
| raise TypeError('The type of `bias_posterior_fn` should be `NormalPosterior`') | |||
| # mindspore operations | |||
| self.bias_add = P.BiasAdd() | |||
| @@ -51,13 +51,10 @@ class _DenseVariational(Cell): | |||
| raise TypeError('The type of `weight_prior_fn` should be `NormalPrior`') | |||
| self.weight_prior = weight_prior_fn() | |||
| if isinstance(weight_posterior_fn, Cell): | |||
| if weight_posterior_fn.__class__.__name__ != 'NormalPosterior': | |||
| raise TypeError('The type of `weight_posterior_fn` should be `NormalPosterior`') | |||
| else: | |||
| if weight_posterior_fn.__name__ != 'NormalPosterior': | |||
| raise TypeError('The type of `weight_posterior_fn` should be `NormalPosterior`') | |||
| self.weight_posterior = weight_posterior_fn(shape=[self.out_channels, self.in_channels], name='bnn_weight') | |||
| try: | |||
| self.weight_posterior = weight_posterior_fn(shape=[self.out_channels, self.in_channels], name='bnn_weight') | |||
| except TypeError: | |||
| raise TypeError('The type of `weight_posterior_fn` should be `NormalPosterior`') | |||
| if self.has_bias: | |||
| if isinstance(bias_prior_fn, Cell): | |||
| @@ -69,13 +66,10 @@ class _DenseVariational(Cell): | |||
| raise TypeError('The type of `bias_prior_fn` should be `NormalPrior`') | |||
| self.bias_prior = bias_prior_fn() | |||
| if isinstance(bias_posterior_fn, Cell): | |||
| if bias_posterior_fn.__class__.__name__ != 'NormalPosterior': | |||
| raise TypeError('The type of `bias_posterior_fn` should be `NormalPosterior`') | |||
| else: | |||
| if bias_posterior_fn.__name__ != 'NormalPosterior': | |||
| raise TypeError('The type of `bias_posterior_fn` should be `NormalPosterior`') | |||
| self.bias_posterior = bias_posterior_fn(shape=[self.out_channels], name='bnn_bias') | |||
| try: | |||
| self.bias_posterior = bias_posterior_fn(shape=[self.out_channels], name='bnn_bias') | |||
| except TypeError: | |||
| raise TypeError('The type of `bias_posterior_fn` should be `NormalPosterior`') | |||
| self.activation = activation | |||
| if isinstance(self.activation, str): | |||
| @@ -51,15 +51,16 @@ class NormalPosterior(Cell): | |||
| Args: | |||
| name (str): Name prepended to trainable parameter. | |||
| shape (list): Shape of the mean and standard deviation. | |||
| shape (list, tuple): Shape of the mean and standard deviation. | |||
| dtype (class `mindspore.dtype`): The argument is used to define the data type of the output tensor. | |||
| Default: mindspore.float32. | |||
| loc_mean ( float, array_like of floats): Mean of distribution to initialize trainable parameters. Default: 0. | |||
| loc_std ( float, array_like of floats): Standard deviation of distribution to initialize trainable parameters. | |||
| Default: 0.1. | |||
| untransformed_scale_mean ( float, array_like of floats): Mean of distribution to initialize trainable | |||
| loc_mean (int, float, array_like of floats): Mean of distribution to initialize trainable parameters. | |||
| Default: 0. | |||
| loc_std (int, float, array_like of floats): Standard deviation of distribution to initialize trainable | |||
| parameters. Default: 0.1. | |||
| untransformed_scale_mean (int, float, array_like of floats): Mean of distribution to initialize trainable | |||
| parameters. Default: -5. | |||
| untransformed_scale_std ( float, array_like of floats): Standard deviation of distribution to initialize | |||
| untransformed_scale_std (int, float, array_like of floats): Standard deviation of distribution to initialize | |||
| trainable parameters. Default: 0.1. | |||
| Returns: | |||
| @@ -80,20 +81,25 @@ class NormalPosterior(Cell): | |||
| if not isinstance(shape, (tuple, list)): | |||
| raise TypeError('The type of `shape` should be `tuple` or `list`') | |||
| if not (np.array(shape) > 0).all(): | |||
| raise ValueError('Negative dimensions are not allowed') | |||
| try: | |||
| mean_arr = np.random.normal(loc_mean, loc_std, shape) | |||
| except ValueError as msg: | |||
| raise ValueError(msg) | |||
| except TypeError as msg: | |||
| raise TypeError(msg) | |||
| if not (np.array(loc_std) >= 0).all(): | |||
| raise ValueError('The value of `loc_std` < 0') | |||
| if not (np.array(untransformed_scale_std) >= 0).all(): | |||
| raise ValueError('The value of `untransformed_scale_std` < 0') | |||
| try: | |||
| untransformed_scale_arr = np.random.normal(untransformed_scale_mean, untransformed_scale_std, shape) | |||
| except ValueError as msg: | |||
| raise ValueError(msg) | |||
| except TypeError as msg: | |||
| raise TypeError(msg) | |||
| self.mean = Parameter( | |||
| Tensor(np.random.normal(loc_mean, loc_std, shape), dtype=dtype), name=name + '_mean') | |||
| Tensor(mean_arr, dtype=dtype), name=name + '_mean') | |||
| self.untransformed_std = Parameter( | |||
| Tensor(np.random.normal(untransformed_scale_mean, untransformed_scale_std, shape), dtype=dtype), | |||
| name=name + '_untransformed_std') | |||
| Tensor(untransformed_scale_arr, dtype=dtype), name=name + '_untransformed_std') | |||
| self.normal = Normal() | |||