From: @shallydeng Reviewed-by: @zichun_ye,@sunnybeike Signed-off-by:tags/v1.1.0
| @@ -164,6 +164,8 @@ class Bijector(Cell): | |||
| self.common_dtype = value_t.dtype | |||
| elif value_t.dtype != self.common_dtype: | |||
| raise TypeError(f"{name} should have the same dtype as other arguments.") | |||
| # check if the parameters are casted into float-type tensors | |||
| validator.check_type_name("dtype", value_t.dtype, mstype.float_type, type(self).__name__) | |||
| # check if the dtype of the input_parameter agrees with the bijector's dtype | |||
| elif value_t.dtype != self.dtype: | |||
| raise TypeError(f"{name} should have the same dtype as the bijector's dtype.") | |||
| @@ -61,7 +61,7 @@ class PowerTransform(Bijector): | |||
| """ | |||
| def __init__(self, | |||
| power=0, | |||
| power=0., | |||
| name='PowerTransform'): | |||
| param = dict(locals()) | |||
| param['param_dict'] = {'power': power} | |||
| @@ -159,6 +159,15 @@ def check_prob(p): | |||
| raise ValueError('Probabilities should be less than one') | |||
| def check_sum_equal_one(probs): | |||
| """ | |||
| Used in categorical distribution. check if probabilities of each category sum to 1. | |||
| """ | |||
| if probs is None: | |||
| raise ValueError(f'input value cannot be None in check_sum_equal_one') | |||
| if isinstance(probs, Parameter): | |||
| if not isinstance(probs.data, Tensor): | |||
| return | |||
| probs = probs.data | |||
| prob_sum = np.sum(probs.asnumpy(), axis=-1) | |||
| comp = np.equal(np.ones(prob_sum.shape), prob_sum) | |||
| if not comp.all(): | |||
| @@ -168,6 +177,12 @@ def check_rank(probs): | |||
| """ | |||
| Used in categorical distribution. check Rank >=1. | |||
| """ | |||
| if probs is None: | |||
| raise ValueError(f'input value cannot be None in check_rank') | |||
| if isinstance(probs, Parameter): | |||
| if not isinstance(probs.data, Tensor): | |||
| return | |||
| probs = probs.data | |||
| if probs.asnumpy().ndim == 0: | |||
| raise ValueError('probs for Categorical distribution must have rank >= 1.') | |||
| @@ -44,7 +44,7 @@ class Bernoulli(Distribution): | |||
| >>> # The following creates two independent Bernoulli distributions. | |||
| >>> b = msd.Bernoulli([0.5, 0.5], dtype=mstype.int32) | |||
| >>> | |||
| >>> # A Bernoulli distribution can be initilized without arguments. | |||
| >>> # A Bernoulli distribution can be initialized without arguments. | |||
| >>> # In this case, `probs` must be passed in through arguments during function calls. | |||
| >>> b = msd.Bernoulli(dtype=mstype.int32) | |||
| >>> | |||
| @@ -106,7 +106,6 @@ class Bernoulli(Distribution): | |||
| ... ans = self.b1.sample((2,3)) | |||
| ... ans = self.b1.sample((2,3), probs_b) | |||
| ... ans = self.b2.sample((2,3), probs_a) | |||
| ... | |||
| """ | |||
| def __init__(self, | |||
| @@ -99,7 +99,6 @@ class Categorical(Distribution): | |||
| ... ans = self.ca.sample((2,3)) | |||
| ... ans = self.ca.sample((2,3), probs_b) | |||
| ... ans = self.ca1.sample((2,3), probs_a) | |||
| ... | |||
| """ | |||
| def __init__(self, | |||
| @@ -48,70 +48,70 @@ class Cauchy(Distribution): | |||
| >>> # The following creates two independent Cauchy distributions. | |||
| >>> cauchy = msd.Cauchy([3.0, 3.0], [4.0, 4.0], dtype=mstype.float32) | |||
| >>> | |||
| >>> # A Cauchy distribution can be initilize without arguments. | |||
| >>> # A Cauchy distribution can be initialized without arguments. | |||
| >>> # In this case, 'loc' and `scale` must be passed in through arguments. | |||
| >>> cauchy = msd.Cauchy(dtype=mstype.float32) | |||
| >>> | |||
| >>> # To use a Cauchy distribution in a network. | |||
| >>> class net(Cell): | |||
| >>> def __init__(self): | |||
| >>> super(net, self).__init__(): | |||
| >>> self.cau1 = msd.Cauchy(0.0, 1.0, dtype=mstype.float32) | |||
| >>> self.cau2 = msd.Cauchy(dtype=mstype.float32) | |||
| >>> | |||
| >>> # The following calls are valid in construct. | |||
| >>> def construct(self, value, loc_b, scale_b, loc_a, scale_a): | |||
| >>> | |||
| >>> # Private interfaces of probability functions corresponding to public interfaces, including | |||
| >>> # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, have the same arguments as follows. | |||
| >>> # Args: | |||
| >>> # value (Tensor): the value to be evaluated. | |||
| >>> # loc (Tensor): the location of the distribution. Default: self.loc. | |||
| >>> # scale (Tensor): the scale of the distribution. Default: self.scale. | |||
| >>> | |||
| >>> # Examples of `prob`. | |||
| >>> # Similar calls can be made to other probability functions | |||
| >>> # by replacing 'prob' by the name of the function | |||
| >>> ans = self.cau1.prob(value) | |||
| >>> # Evaluate with respect to distribution b. | |||
| >>> ans = self.cau1.prob(value, loc_b, scale_b) | |||
| >>> # `loc` and `scale` must be passed in during function calls | |||
| >>> ans = self.cau2.prob(value, loc_a, scale_a) | |||
| >>> | |||
| >>> # Functions `mode` and `entropy` have the same arguments. | |||
| >>> # Args: | |||
| >>> # loc (Tensor): the location of the distribution. Default: self.loc. | |||
| >>> # scale (Tensor): the scale of the distribution. Default: self.scale. | |||
| >>> | |||
| >>> # Example of `mode`. | |||
| >>> ans = self.cau1.mode() # return 0.0 | |||
| >>> ans = self.cau1.mode(loc_b, scale_b) # return loc_b | |||
| >>> # `loc` and `scale` must be passed in during function calls. | |||
| >>> ans = self.cau2.mode(loc_a, scale_a) | |||
| >>> | |||
| >>> # Interfaces of 'kl_loss' and 'cross_entropy' are the same: | |||
| >>> # Args: | |||
| >>> # dist (str): the type of the distributions. Only "Cauchy" is supported. | |||
| >>> # loc_b (Tensor): the loc of distribution b. | |||
| >>> # scale_b (Tensor): the scale distribution b. | |||
| >>> # loc (Tensor): the loc of distribution a. Default: self.loc. | |||
| >>> # scale (Tensor): the scale distribution a. Default: self.scale. | |||
| >>> | |||
| >>> # Examples of `kl_loss`. `cross_entropy` is similar. | |||
| >>> ans = self.cau1.kl_loss('Cauchy', loc_b, scale_b) | |||
| >>> ans = self.cau1.kl_loss('Cauchy', loc_b, scale_b, loc_a, scale_a) | |||
| >>> # Additional `loc` and `scale` must be passed in. | |||
| >>> ans = self.cau2.kl_loss('Cauchy', loc_b, scale_b, loc_a, scale_a) | |||
| >>> | |||
| >>> # Examples of `sample`. | |||
| >>> # Args: | |||
| >>> # shape (tuple): the shape of the sample. Default: () | |||
| >>> # loc (Tensor): the location of the distribution. Default: self.loc. | |||
| >>> # scale (Tensor): the scale of the distribution. Default: self.scale. | |||
| >>> ans = self.cau1.sample() | |||
| >>> ans = self.cau1.sample((2,3)) | |||
| >>> ans = self.cau1.sample((2,3), loc_b, s_b) | |||
| >>> ans = self.cau2.sample((2,3), loc_a, s_a) | |||
| ... def __init__(self): | |||
| ... super(net, self).__init__(): | |||
| ... self.cau1 = msd.Cauchy(0.0, 1.0, dtype=mstype.float32) | |||
| ... self.cau2 = msd.Cauchy(dtype=mstype.float32) | |||
| ... | |||
| ... # The following calls are valid in construct. | |||
| ... def construct(self, value, loc_b, scale_b, loc_a, scale_a): | |||
| ... | |||
| ... # Private interfaces of probability functions corresponding to public interfaces, including | |||
| ... # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, have the same arguments as follows. | |||
| ... # Args: | |||
| ... # value (Tensor): the value to be evaluated. | |||
| ... # loc (Tensor): the location of the distribution. Default: self.loc. | |||
| ... # scale (Tensor): the scale of the distribution. Default: self.scale. | |||
| ... | |||
| ... # Examples of `prob`. | |||
| ... # Similar calls can be made to other probability functions | |||
| ... # by replacing 'prob' by the name of the function | |||
| ... ans = self.cau1.prob(value) | |||
| ... # Evaluate with respect to distribution b. | |||
| ... ans = self.cau1.prob(value, loc_b, scale_b) | |||
| ... # `loc` and `scale` must be passed in during function calls | |||
| ... ans = self.cau2.prob(value, loc_a, scale_a) | |||
| ... | |||
| ... # Functions `mode` and `entropy` have the same arguments. | |||
| ... # Args: | |||
| ... # loc (Tensor): the location of the distribution. Default: self.loc. | |||
| ... # scale (Tensor): the scale of the distribution. Default: self.scale. | |||
| ... | |||
| ... # Example of `mode`. | |||
| ... ans = self.cau1.mode() # return 0.0 | |||
| ... ans = self.cau1.mode(loc_b, scale_b) # return loc_b | |||
| ... # `loc` and `scale` must be passed in during function calls. | |||
| ... ans = self.cau2.mode(loc_a, scale_a) | |||
| ... | |||
| ... # Interfaces of 'kl_loss' and 'cross_entropy' are the same: | |||
| ... # Args: | |||
| ... # dist (str): the type of the distributions. Only "Cauchy" is supported. | |||
| ... # loc_b (Tensor): the loc of distribution b. | |||
| ... # scale_b (Tensor): the scale distribution b. | |||
| ... # loc (Tensor): the loc of distribution a. Default: self.loc. | |||
| ... # scale (Tensor): the scale distribution a. Default: self.scale. | |||
| ... | |||
| ... # Examples of `kl_loss`. `cross_entropy` is similar. | |||
| ... ans = self.cau1.kl_loss('Cauchy', loc_b, scale_b) | |||
| ... ans = self.cau1.kl_loss('Cauchy', loc_b, scale_b, loc_a, scale_a) | |||
| ... # Additional `loc` and `scale` must be passed in. | |||
| ... ans = self.cau2.kl_loss('Cauchy', loc_b, scale_b, loc_a, scale_a) | |||
| ... | |||
| ... # Examples of `sample`. | |||
| ... # Args: | |||
| ... # shape (tuple): the shape of the sample. Default: () | |||
| ... # loc (Tensor): the location of the distribution. Default: self.loc. | |||
| ... # scale (Tensor): the scale of the distribution. Default: self.scale. | |||
| ... ans = self.cau1.sample() | |||
| ... ans = self.cau1.sample((2,3)) | |||
| ... ans = self.cau1.sample((2,3), loc_b, s_b) | |||
| ... ans = self.cau2.sample((2,3), loc_a, s_a) | |||
| """ | |||
| def __init__(self, | |||
| @@ -46,7 +46,7 @@ class Exponential(Distribution): | |||
| >>> # The following creates two independent Exponential distributions. | |||
| >>> e = msd.Exponential([0.5, 0.5], dtype=mstype.float32) | |||
| >>> | |||
| >>> # An Exponential distribution can be initilized without arguments. | |||
| >>> # An Exponential distribution can be initialized without arguments. | |||
| >>> # In this case, `rate` must be passed in through `args` during function calls. | |||
| >>> e = msd.Exponential(dtype=mstype.float32) | |||
| >>> | |||
| @@ -108,7 +108,6 @@ class Exponential(Distribution): | |||
| ... ans = self.e1.sample((2,3)) | |||
| ... ans = self.e1.sample((2,3), rate_b) | |||
| ... ans = self.e2.sample((2,3), rate_a) | |||
| ... | |||
| """ | |||
| def __init__(self, | |||
| @@ -187,7 +186,7 @@ class Exponential(Distribution): | |||
| def _sd(self, rate=None): | |||
| r""" | |||
| .. math:: | |||
| sd(EXP) = \frac{1.0}{\lambda}. | |||
| SD(EXP) = \frac{1.0}{\lambda}. | |||
| """ | |||
| rate = self._check_param_type(rate) | |||
| return 1.0 / rate | |||
| @@ -47,7 +47,7 @@ class Geometric(Distribution): | |||
| >>> # The following creates two independent Geometric distributions. | |||
| >>> n = msd.Geometric([0.5, 0.5], dtype=mstype.int32) | |||
| >>> | |||
| >>> # A Geometric distribution can be initilized without arguments. | |||
| >>> # A Geometric distribution can be initialized without arguments. | |||
| >>> # In this case, `probs` must be passed in through arguments during function calls. | |||
| >>> n = msd.Geometric(dtype=mstype.int32) | |||
| >>> | |||
| @@ -109,7 +109,6 @@ class Geometric(Distribution): | |||
| ... ans = self.g1.sample((2,3)) | |||
| ... ans = self.g1.sample((2,3), probs_b) | |||
| ... ans = self.g2.sample((2,3), probs_a) | |||
| ... | |||
| """ | |||
| def __init__(self, | |||
| @@ -91,7 +91,6 @@ class Gumbel(TransformedDistribution): | |||
| ... | |||
| ... ans = self.g1.sample() | |||
| ... ans = self.g1.sample((2,3)) | |||
| ... | |||
| """ | |||
| def __init__(self, | |||
| @@ -47,7 +47,7 @@ class LogNormal(msd.TransformedDistribution): | |||
| >>> # The following creates two independent LogNormal distributions. | |||
| >>> n = msd.LogNormal([3.0, 3.0], [4.0, 4.0], dtype=mstype.float32) | |||
| >>> | |||
| >>> # A LogNormal distribution can be initilize without arguments. | |||
| >>> # A LogNormal distribution can be initialized without arguments. | |||
| >>> # In this case, `loc` and `scale` must be passed in during function calls. | |||
| >>> n = msd.LogNormal(dtype=mstype.float32) | |||
| >>> | |||
| @@ -122,7 +122,6 @@ class LogNormal(msd.TransformedDistribution): | |||
| ... ans = self.n1.sample((2,3)) | |||
| ... ans = self.n1.sample((2,3), loc_b, scale_b) | |||
| ... ans = self.n2.sample((2,3), loc_a, scale_a) | |||
| ... | |||
| """ | |||
| def __init__(self, | |||
| @@ -47,7 +47,7 @@ class Logistic(Distribution): | |||
| >>> # The following creates two independent Logistic distributions. | |||
| >>> n = msd.Logistic([3.0, 3.0], [4.0, 4.0], dtype=mstype.float32) | |||
| >>> | |||
| >>> # A Logistic distribution can be initilize without arguments. | |||
| >>> # A Logistic distribution can be initialized without arguments. | |||
| >>> # In this case, `loc` and `scale` must be passed in through arguments. | |||
| >>> n = msd.Logistic(dtype=mstype.float32) | |||
| >>> | |||
| @@ -97,7 +97,6 @@ class Logistic(Distribution): | |||
| ... ans = self.l1.sample((2,3)) | |||
| ... ans = self.l1.sample((2,3), loc_b, scale_b) | |||
| ... ans = self.l2.sample((2,3), loc_a, scale_a) | |||
| ... | |||
| """ | |||
| def __init__(self, | |||
| @@ -47,7 +47,7 @@ class Normal(Distribution): | |||
| >>> # The following creates two independent Normal distributions. | |||
| >>> n = msd.Normal([3.0, 3.0], [4.0, 4.0], dtype=mstype.float32) | |||
| >>> | |||
| >>> # A Normal distribution can be initilize without arguments. | |||
| >>> # A Normal distribution can be initialized without arguments. | |||
| >>> # In this case, `mean` and `sd` must be passed in through arguments. | |||
| >>> n = msd.Normal(dtype=mstype.float32) | |||
| >>> | |||
| @@ -55,7 +55,7 @@ class Normal(Distribution): | |||
| >>> class net(Cell): | |||
| ... def __init__(self): | |||
| ... super(net, self).__init__(): | |||
| ... self.n1 = msd.Nomral(0.0, 1.0, dtype=mstype.float32) | |||
| ... self.n1 = msd.Normal(0.0, 1.0, dtype=mstype.float32) | |||
| ... self.n2 = msd.Normal(dtype=mstype.float32) | |||
| ... | |||
| ... # The following calls are valid in construct. | |||
| @@ -65,14 +65,14 @@ class Normal(Distribution): | |||
| ... # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, have the same arguments as follows. | |||
| ... # Args: | |||
| ... # value (Tensor): the value to be evaluated. | |||
| ... # mean (Tensor): the mean of distribution. Default: self._mean_value. | |||
| ... # sd (Tensor): the standard deviation of distribution. Default: self._sd_value. | |||
| ... # mean (Tensor): the mean of the distribution. Default: self._mean_value. | |||
| ... # sd (Tensor): the standard deviation of the distribution. Default: self._sd_value. | |||
| ... | |||
| ... # Examples of `prob`. | |||
| ... # Similar calls can be made to other probability functions | |||
| ... # by replacing 'prob' by the name of the function | |||
| ... ans = self.n1.prob(value) | |||
| ... # Evaluate with respect to distribution b. | |||
| ... # Evaluate with respect to the distribution b. | |||
| ... ans = self.n1.prob(value, mean_b, sd_b) | |||
| ... # `mean` and `sd` must be passed in during function calls | |||
| ... ans = self.n2.prob(value, mean_a, sd_a) | |||
| @@ -80,8 +80,8 @@ class Normal(Distribution): | |||
| ... | |||
| ... # Functions `mean`, `sd`, `var`, and `entropy` have the same arguments. | |||
| ... # Args: | |||
| ... # mean (Tensor): the mean of distribution. Default: self._mean_value. | |||
| ... # sd (Tensor): the standard deviation of distribution. Default: self._sd_value. | |||
| ... # mean (Tensor): the mean of the distribution. Default: self._mean_value. | |||
| ... # sd (Tensor): the standard deviation of the distribution. Default: self._sd_value. | |||
| ... | |||
| ... # Example of `mean`. `sd`, `var`, and `entropy` are similar. | |||
| ... ans = self.n1.mean() # return 0.0 | |||
| @@ -94,9 +94,9 @@ class Normal(Distribution): | |||
| ... # Args: | |||
| ... # dist (str): the type of the distributions. Only "Normal" is supported. | |||
| ... # mean_b (Tensor): the mean of distribution b. | |||
| ... # sd_b (Tensor): the standard deviation distribution b. | |||
| ... # sd_b (Tensor): the standard deviation of distribution b. | |||
| ... # mean_a (Tensor): the mean of distribution a. Default: self._mean_value. | |||
| ... # sd_a (Tensor): the standard deviation distribution a. Default: self._sd_value. | |||
| ... # sd_a (Tensor): the standard deviation of distribution a. Default: self._sd_value. | |||
| ... | |||
| ... # Examples of `kl_loss`. `cross_entropy` is similar. | |||
| ... ans = self.n1.kl_loss('Normal', mean_b, sd_b) | |||
| @@ -113,7 +113,6 @@ class Normal(Distribution): | |||
| ... ans = self.n1.sample((2,3)) | |||
| ... ans = self.n1.sample((2,3), mean_b, sd_b) | |||
| ... ans = self.n2.sample((2,3), mean_a, sd_a) | |||
| ... | |||
| """ | |||
| def __init__(self, | |||
| @@ -67,7 +67,6 @@ class TransformedDistribution(Distribution): | |||
| ... # Similar calls can be made to other functions | |||
| ... # by replacing 'sample' by the name of the function. | |||
| ... ans = self.ln.sample(shape=(2, 3)) | |||
| ... | |||
| """ | |||
| def __init__(self, | |||
| @@ -46,7 +46,7 @@ class Uniform(Distribution): | |||
| >>> # The following creates two independent Uniform distributions. | |||
| >>> u = msd.Uniform([0.0, 0.0], [1.0, 2.0], dtype=mstype.float32) | |||
| >>> | |||
| >>> # A Uniform distribution can be initilized without arguments. | |||
| >>> # A Uniform distribution can be initialized without arguments. | |||
| >>> # In this case, `high` and `low` must be passed in through arguments during function calls. | |||
| >>> u = msd.Uniform(dtype=mstype.float32) | |||
| >>> | |||
| @@ -64,8 +64,8 @@ class Uniform(Distribution): | |||
| ... # `prob`, `log_prob`, `cdf`, `log_cdf`, `survival_function`, and `log_survival`, have the same arguments. | |||
| ... # Args: | |||
| ... # value (Tensor): the value to be evaluated. | |||
| ... # low (Tensor): the lower bound of distribution. Default: self.low. | |||
| ... # high (Tensor): the higher bound of distribution. Default: self.high. | |||
| ... # low (Tensor): the lower bound of the distribution. Default: self.low. | |||
| ... # high (Tensor): the higher bound of the distribution. Default: self.high. | |||
| ... | |||
| ... # Examples of `prob`. | |||
| ... # Similar calls can be made to other probability functions | |||
| @@ -79,8 +79,8 @@ class Uniform(Distribution): | |||
| ... | |||
| ... # Functions `mean`, `sd`, `var`, and `entropy` have the same arguments. | |||
| ... # Args: | |||
| ... # low (Tensor): the lower bound of distribution. Default: self.low. | |||
| ... # high (Tensor): the higher bound of distribution. Default: self.high. | |||
| ... # low (Tensor): the lower bound of the distribution. Default: self.low. | |||
| ... # high (Tensor): the higher bound of the distribution. Default: self.high. | |||
| ... | |||
| ... # Examples of `mean`. `sd`, `var`, and `entropy` are similar. | |||
| ... ans = self.u1.mean() # return 0.5 | |||
| @@ -112,7 +112,6 @@ class Uniform(Distribution): | |||
| ... ans = self.u1.sample((2,3)) | |||
| ... ans = self.u1.sample((2,3), low_b, high_b) | |||
| ... ans = self.u2.sample((2,3), low_a, high_a) | |||
| ... | |||
| """ | |||
| def __init__(self, | |||
| @@ -35,7 +35,7 @@ class Net(nn.Cell): | |||
| return forward | |||
| def test_forward(): | |||
| power = 2 | |||
| power = 2. | |||
| x = np.array([2.0, 3.0, 4.0, 5.0], dtype=np.float32) | |||
| tx = Tensor(x, dtype=dtype.float32) | |||
| forward = Net(power=power) | |||
| @@ -57,7 +57,7 @@ class Net1(nn.Cell): | |||
| return inverse | |||
| def test_inverse(): | |||
| power = 2 | |||
| power = 2. | |||
| y = np.array([2.0, 3.0, 4.0, 5.0], dtype=np.float32) | |||
| ty = Tensor(y, dtype=dtype.float32) | |||
| inverse = Net1(power=power) | |||
| @@ -78,7 +78,7 @@ class Net2(nn.Cell): | |||
| return self.bijector.forward_log_jacobian(x_) | |||
| def test_forward_jacobian(): | |||
| power = 2 | |||
| power = 2. | |||
| x = np.array([2.0, 3.0, 4.0, 5.0], dtype=np.float32) | |||
| tx = Tensor(x, dtype=dtype.float32) | |||
| forward_jacobian = Net2(power=power) | |||
| @@ -99,7 +99,7 @@ class Net3(nn.Cell): | |||
| return self.bijector.inverse_log_jacobian(y_) | |||
| def test_inverse_jacobian(): | |||
| power = 2 | |||
| power = 2. | |||
| y = np.array([2.0, 3.0, 4.0, 5.0], dtype=np.float32) | |||
| ty = Tensor(y, dtype=dtype.float32) | |||
| inverse_jacobian = Net3(power=power) | |||
| @@ -1,4 +1,4 @@ | |||
| # Copyright 2019 Huawei Technologies Co., Ltd | |||
| # Copyright 2020 Huawei Technologies Co., Ltd | |||
| # | |||
| # Licensed under the Apache License, Version 2.0 (the "License"); | |||
| # you may not use this file except in compliance with the License. | |||
| @@ -99,6 +99,14 @@ def test_arguments_same_type(): | |||
| assert isinstance(bijector, msb.Bijector) | |||
| bijector = MyBijector(1.0, 2.0) | |||
| assert isinstance(bijector, msb.Bijector) | |||
| with pytest.raises(TypeError): | |||
| MyBijector(1, 2) | |||
| with pytest.raises(TypeError): | |||
| MyBijector([1, 2], [2, 4]) | |||
| with pytest.raises(TypeError): | |||
| MyBijector(np.array([1, 2]).astype(np.int32), np.array([1, 2]).astype(np.int32)) | |||
| with pytest.raises(TypeError): | |||
| MyBijector(Tensor([1, 2], dtype=dtype.int32), Tensor([1, 2], dtype=dtype.int32)) | |||
| def test_arguments_with_dtype_specified(): | |||
| """ | |||
| @@ -118,12 +126,20 @@ def test_arguments_with_dtype_specified(): | |||
| MySecondBijector(None, param2_2) | |||
| param1_3 = Tensor(1.0, dtype=dtype.float32) | |||
| param2_3 = Tensor(2.0, dtype=dtype.float32) | |||
| bijector = MyBijector(param1_3, param2_3) | |||
| bijector = MySecondBijector(param1_3, param2_3) | |||
| assert isinstance(bijector, msb.Bijector) | |||
| param1_4 = np.array(2.0).astype(np.float32) | |||
| param2_4 = np.array(1.0).astype(np.float32) | |||
| bijector = MyBijector(param1_4, param2_4) | |||
| bijector = MySecondBijector(param1_4, param2_4) | |||
| assert isinstance(bijector, msb.Bijector) | |||
| with pytest.raises(TypeError): | |||
| MySecondBijector(1, 2) | |||
| with pytest.raises(TypeError): | |||
| MySecondBijector([1, 2], [2, 4]) | |||
| with pytest.raises(TypeError): | |||
| MySecondBijector(np.array([1, 2]).astype(np.int32), np.array([1, 2]).astype(np.int32)) | |||
| with pytest.raises(TypeError): | |||
| MySecondBijector(Tensor([1, 2], dtype=dtype.int32), Tensor([1, 2], dtype=dtype.int32)) | |||
| class Net1(nn.Cell): | |||
| """ | |||
| @@ -1,4 +1,4 @@ | |||
| # Copyright 2019 Huawei Technologies Co., Ltd | |||
| # Copyright 2020 Huawei Technologies Co., Ltd | |||
| # | |||
| # Licensed under the Apache License, Version 2.0 (the "License"); | |||
| # you may not use this file except in compliance with the License. | |||
| @@ -1,4 +1,4 @@ | |||
| # Copyright 2019 Huawei Technologies Co., Ltd | |||
| # Copyright 2020 Huawei Technologies Co., Ltd | |||
| # | |||
| # Licensed under the Apache License, Version 2.0 (the "License"); | |||
| # you may not use this file except in compliance with the License. | |||
| @@ -1,4 +1,4 @@ | |||
| # Copyright 2019 Huawei Technologies Co., Ltd | |||
| # Copyright 2020 Huawei Technologies Co., Ltd | |||
| # | |||
| # Licensed under the Apache License, Version 2.0 (the "License"); | |||
| # you may not use this file except in compliance with the License. | |||
| @@ -1,4 +1,4 @@ | |||
| # Copyright 2019 Huawei Technologies Co., Ltd | |||
| # Copyright 2020 Huawei Technologies Co., Ltd | |||
| # | |||
| # Licensed under the Apache License, Version 2.0 (the "License"); | |||
| # you may not use this file except in compliance with the License. | |||
| @@ -22,7 +22,7 @@ from mindspore import dtype | |||
| def test_init(): | |||
| b = msb.PowerTransform() | |||
| assert isinstance(b, msb.Bijector) | |||
| b = msb.PowerTransform(1) | |||
| b = msb.PowerTransform(1.) | |||
| assert isinstance(b, msb.Bijector) | |||
| def test_type(): | |||
| @@ -37,7 +37,7 @@ class Net(nn.Cell): | |||
| """ | |||
| def __init__(self): | |||
| super(Net, self).__init__() | |||
| self.b1 = msb.PowerTransform(power=0) | |||
| self.b1 = msb.PowerTransform(power=0.) | |||
| self.b2 = msb.PowerTransform() | |||
| def construct(self, x_): | |||
| @@ -60,7 +60,7 @@ class Jacobian(nn.Cell): | |||
| """ | |||
| def __init__(self): | |||
| super(Jacobian, self).__init__() | |||
| self.b1 = msb.PowerTransform(power=0) | |||
| self.b1 = msb.PowerTransform(power=0.) | |||
| self.b2 = msb.PowerTransform() | |||
| def construct(self, x_): | |||
| @@ -1,4 +1,4 @@ | |||
| # Copyright 2019 Huawei Technologies Co., Ltd | |||
| # Copyright 2020 Huawei Technologies Co., Ltd | |||
| # | |||
| # Licensed under the Apache License, Version 2.0 (the "License"); | |||
| # you may not use this file except in compliance with the License. | |||
| @@ -1,4 +1,4 @@ | |||
| # Copyright 2019 Huawei Technologies Co., Ltd | |||
| # Copyright 2020 Huawei Technologies Co., Ltd | |||
| # | |||
| # Licensed under the Apache License, Version 2.0 (the "License"); | |||
| # you may not use this file except in compliance with the License. | |||
| @@ -1,4 +1,4 @@ | |||
| # Copyright 2019 Huawei Technologies Co., Ltd | |||
| # Copyright 2020 Huawei Technologies Co., Ltd | |||
| # | |||
| # Licensed under the Apache License, Version 2.0 (the "License"); | |||
| # you may not use this file except in compliance with the License. | |||