Commit 7d9a7ab5 authored by Robert P. Goldman's avatar Robert P. Goldman Committed by Thomas Wiecki

Fix docstrings.

Added raw string qualifiers for docstrings that were causing the deprecation warnings in issue #3878.
In passing, fixed some other minor docstring formatting issues.
parent 683faaa9
......@@ -103,7 +103,7 @@ def assert_negative_support(var, label, distname, value=-1e-6):
def get_tau_sigma(tau=None, sigma=None):
"""
r"""
Find precision and standard deviation. The link between the two
parameterizations is given by the inverse relationship:
......@@ -770,7 +770,7 @@ class TruncatedNormal(BoundedContinuous):
name = r'\text{%s}' % name
return (
r'${} \sim \text{{TruncatedNormal}}('
'\mathit{{mu}}={},~\mathit{{sigma}}={},a={},b={})$'
r'\mathit{{mu}}={},~\mathit{{sigma}}={},a={},b={})$'
.format(
name,
get_variable_name(self.mu),
......@@ -2968,7 +2968,7 @@ class Weibull(PositiveContinuous):
get_variable_name(beta))
def logcdf(self, value):
"""
r"""
Compute the log of the cumulative distribution function for Weibull distribution
at the specified value.
......@@ -4130,7 +4130,7 @@ class Logistic(Continuous):
get_variable_name(s))
def logcdf(self, value):
"""
r"""
Compute the log of the cumulative distribution function for Logistic distribution
at the specified value.
......
......@@ -80,7 +80,7 @@ class Binomial(Discrete):
self.mode = tt.cast(tround(n * p), self.dtype)
def random(self, point=None, size=None):
"""
r"""
Draw random values from Binomial distribution.
Parameters
......@@ -102,7 +102,7 @@ class Binomial(Discrete):
size=size)
def logp(self, value):
"""
r"""
Calculate log-probability of Binomial distribution at specified value.
Parameters
......@@ -215,7 +215,7 @@ class BetaBinomial(Discrete):
return samples
def random(self, point=None, size=None):
"""
r"""
Draw random values from BetaBinomial distribution.
Parameters
......@@ -238,7 +238,7 @@ class BetaBinomial(Discrete):
size=size)
def logp(self, value):
"""
r"""
Calculate log-probability of BetaBinomial distribution at specified value.
Parameters
......@@ -326,7 +326,7 @@ class Bernoulli(Discrete):
self.mode = tt.cast(tround(self.p), 'int8')
def random(self, point=None, size=None):
"""
r"""
Draw random values from Bernoulli distribution.
Parameters
......@@ -348,7 +348,7 @@ class Bernoulli(Discrete):
size=size)
def logp(self, value):
"""
r"""
Calculate log-probability of Bernoulli distribution at specified value.
Parameters
......@@ -427,7 +427,7 @@ class DiscreteWeibull(Discrete):
self.median = self._ppf(0.5)
def logp(self, value):
"""
r"""
Calculate log-probability of DiscreteWeibull distribution at specified value.
Parameters
......@@ -449,7 +449,7 @@ class DiscreteWeibull(Discrete):
0 < beta)
def _ppf(self, p):
"""
r"""
The percentile point function (the inverse of the cumulative
distribution function) of the discrete Weibull distribution.
"""
......@@ -464,7 +464,7 @@ class DiscreteWeibull(Discrete):
return np.ceil(np.power(np.log(1 - p) / np.log(q), 1. / beta)) - 1
def random(self, point=None, size=None):
"""
r"""
Draw random values from DiscreteWeibull distribution.
Parameters
......@@ -547,7 +547,7 @@ class Poisson(Discrete):
self.mode = intX(tt.floor(mu))
def random(self, point=None, size=None):
"""
r"""
Draw random values from Poisson distribution.
Parameters
......@@ -569,7 +569,7 @@ class Poisson(Discrete):
size=size)
def logp(self, value):
"""
r"""
Calculate log-probability of Poisson distribution at specified value.
Parameters
......@@ -656,7 +656,7 @@ class NegativeBinomial(Discrete):
self.mode = intX(tt.floor(mu))
def random(self, point=None, size=None):
"""
r"""
Draw random values from NegativeBinomial distribution.
Parameters
......@@ -680,7 +680,7 @@ class NegativeBinomial(Discrete):
return np.asarray(stats.poisson.rvs(g)).reshape(g.shape)
def _random(self, mu, alpha, size):
""" Wrapper around stats.gamma.rvs that converts NegativeBinomial's
r""" Wrapper around stats.gamma.rvs that converts NegativeBinomial's
parametrization to scipy.gamma. All parameter arrays should have
been broadcasted properly by generate_samples at this point and size is
the scipy.rvs representation.
......@@ -692,7 +692,7 @@ class NegativeBinomial(Discrete):
)
def logp(self, value):
"""
r"""
Calculate log-probability of NegativeBinomial distribution at specified value.
Parameters
......@@ -771,7 +771,7 @@ class Geometric(Discrete):
self.mode = 1
def random(self, point=None, size=None):
"""
r"""
Draw random values from Geometric distribution.
Parameters
......@@ -793,7 +793,7 @@ class Geometric(Discrete):
size=size)
def logp(self, value):
"""
r"""
Calculate log-probability of Geometric distribution at specified value.
Parameters
......@@ -872,7 +872,7 @@ class DiscreteUniform(Discrete):
return samples
def random(self, point=None, size=None):
"""
r"""
Draw random values from DiscreteUniform distribution.
Parameters
......@@ -895,7 +895,7 @@ class DiscreteUniform(Discrete):
size=size)
def logp(self, value):
"""
r"""
Calculate log-probability of DiscreteUniform distribution at specified value.
Parameters
......@@ -975,7 +975,7 @@ class Categorical(Discrete):
self.mode = tt.squeeze(self.mode)
def random(self, point=None, size=None):
"""
r"""
Draw random values from Categorical distribution.
Parameters
......@@ -1001,7 +1001,7 @@ class Categorical(Discrete):
size=size)
def logp(self, value):
"""
r"""
Calculate log-probability of Categorical distribution at specified value.
Parameters
......@@ -1054,7 +1054,7 @@ class Categorical(Discrete):
class Constant(Discrete):
"""
r"""
Constant log-likelihood.
Parameters
......@@ -1070,7 +1070,7 @@ class Constant(Discrete):
self.mean = self.median = self.mode = self.c = c = tt.as_tensor_variable(c)
def random(self, point=None, size=None):
"""
r"""
Draw random values from Constant distribution.
Parameters
......@@ -1096,7 +1096,7 @@ class Constant(Discrete):
size=size).astype(dtype)
def logp(self, value):
"""
r"""
Calculate log-probability of Constant distribution at specified value.
Parameters
......@@ -1180,7 +1180,7 @@ class ZeroInflatedPoisson(Discrete):
self.mode = self.pois.mode
def random(self, point=None, size=None):
"""
r"""
Draw random values from ZeroInflatedPoisson distribution.
Parameters
......@@ -1204,7 +1204,7 @@ class ZeroInflatedPoisson(Discrete):
return g * (np.random.random(g.shape) < psi)
def logp(self, value):
"""
r"""
Calculate log-probability of ZeroInflatedPoisson distribution at specified value.
Parameters
......@@ -1302,7 +1302,7 @@ class ZeroInflatedBinomial(Discrete):
self.mode = self.bin.mode
def random(self, point=None, size=None):
"""
r"""
Draw random values from ZeroInflatedBinomial distribution.
Parameters
......@@ -1326,7 +1326,7 @@ class ZeroInflatedBinomial(Discrete):
return g * (np.random.random(g.shape) < psi)
def logp(self, value):
"""
r"""
Calculate log-probability of ZeroInflatedBinomial distribution at specified value.
Parameters
......@@ -1448,7 +1448,7 @@ class ZeroInflatedNegativeBinomial(Discrete):
self.mode = self.nb.mode
def random(self, point=None, size=None):
"""
r"""
Draw random values from ZeroInflatedNegativeBinomial distribution.
Parameters
......@@ -1478,7 +1478,7 @@ class ZeroInflatedNegativeBinomial(Discrete):
return stats.poisson.rvs(g) * (np.random.random(g.shape) < psi)
def _random(self, mu, alpha, size):
""" Wrapper around stats.gamma.rvs that converts NegativeBinomial's
r""" Wrapper around stats.gamma.rvs that converts NegativeBinomial's
parametrization to scipy.gamma. All parameter arrays should have
been broadcasted properly by generate_samples at this point and size is
the scipy.rvs representation.
......@@ -1490,7 +1490,7 @@ class ZeroInflatedNegativeBinomial(Discrete):
)
def logp(self, value):
"""
r"""
Calculate log-probability of ZeroInflatedNegativeBinomial distribution at specified value.
Parameters
......
......@@ -224,7 +224,7 @@ class Latent(Base):
@conditioned_vars(["X", "f", "nu"])
class TP(Latent):
"""
r"""
Student's T process prior.
The usage is nearly identical to that of `gp.Latent`. The differences
......@@ -239,11 +239,11 @@ class TP(Latent):
Parameters
----------
cov_func: None, 2D array, or instance of Covariance
cov_func : None, 2D array, or instance of Covariance
The covariance function. Defaults to zero.
mean_func: None, instance of Mean
mean_func : None, instance of Mean
The mean function. Defaults to zero.
nu: float
nu : float
The degrees of freedom
References
......
......@@ -37,12 +37,18 @@ from functools import reduce, partial
def kronecker(*Ks):
"""Return the Kronecker product of arguments:
r"""Return the Kronecker product of arguments:
:math:`K_1 \otimes K_2 \otimes ... \otimes K_D`
Parameters
----------
Ks: 2D array-like
Ks : Iterable of 2D array-like
Arrays of which to take the product.
Returns
-------
np.ndarray :
Block matrix Kroncker product of the argument matrices.
"""
return reduce(tt.slinalg.kron, Ks)
......@@ -60,16 +66,20 @@ def cartesian(*arrays):
def kron_matrix_op(krons, m, op):
"""Apply op to krons and m in a way that reproduces ``op(kronecker(*krons), m)``
r"""Apply op to krons and m in a way that reproduces ``op(kronecker(*krons), m)``
Parameters
-----------
krons: list of square 2D array-like objects
D square matrices :math:`[A_1, A_2, ..., A_D]` to be Kronecker'ed
:math:`A = A_1 \otimes A_2 \otimes ... \otimes A_D`
Product of column dimensions must be :math:`N`
m : NxM array or 1D array (treated as Nx1)
Object that krons act upon
krons : list of square 2D array-like objects
D square matrices :math:`[A_1, A_2, ..., A_D]` to be Kronecker'ed
:math:`A = A_1 \otimes A_2 \otimes ... \otimes A_D`
Product of column dimensions must be :math:`N`
m : NxM array or 1D array (treated as Nx1)
Object that krons act upon
Returns
-------
numpy array
"""
def flat_matrix_op(flat_mat, mat):
Nmat = mat.shape[1]
......
......@@ -25,7 +25,7 @@ floatX = theano.config.floatX
class DifferentialEquation(theano.Op):
"""
r"""
Specify an ordinary differential equation
.. math::
......@@ -94,14 +94,20 @@ class DifferentialEquation(theano.Op):
# Cache symbolic sensitivities by the hash of inputs
self._apply_nodes = {}
self._output_sensitivities = {}
def _system(self, Y, t, p):
"""This is the function that will be passed to odeint. Solves both ODE and sensitivities.
Args:
Y: augmented state vector (n_states + n_states + n_theta)
t: current time
p: parameter vector (y0, theta)
# FIXME: what are the types of these arguments? Couldn't guess when I was reformatting the
# docstring. [2020/04/11:rpg]
def _system(self, Y, t, p):
r"""This is the function that will be passed to odeint. Solves both ODE and sensitivities.
Parameters
----------
Y :
augmented state vector (n_states + n_states + n_theta)
t :
current time
p :
parameter vector (y0, theta)
"""
dydt, ddt_dydp = self._augmented_func(Y[:self.n_states], t, p, Y[self.n_states:])
derivatives = np.concatenate([dydt, ddt_dydp])
......
......@@ -18,7 +18,7 @@ import theano.tensor as tt
def make_sens_ic(n_states, n_theta, floatX):
"""
r"""
The sensitivity matrix will always have consistent form. (n_states, n_states + n_theta)
If the first n_states entries of the parameters vector in the simulate call
......@@ -32,16 +32,16 @@ def make_sens_ic(n_states, n_theta, floatX):
Parameters
----------
n_states: int
n_states : int
Number of state variables in the ODE
n_theta: int
n_theta : int
Number of ODE parameters
floatX: str
floatX : str
dtype to be used for the array
Returns
-------
dydp: array
dydp : array
1D-array of shape (n_states * (n_states + n_theta),), representing the initial condition of the sensitivities
"""
......
......@@ -33,7 +33,7 @@ def sample_smc(
model=None,
random_seed=-1,
):
"""
r"""
Sequential Monte Carlo based sampling
Parameters
......
......@@ -24,7 +24,7 @@ LATEX_ESCAPE_RE = re.compile(r'(%|_|\$|#|&)', re.MULTILINE)
def escape_latex(strng):
"""Consistently escape LaTeX special characters for _repr_latex_ in IPython
r"""Consistently escape LaTeX special characters for _repr_latex_ in IPython
Implementation taken from the IPython magic `format_latex`
......@@ -48,7 +48,7 @@ def escape_latex(strng):
def get_transformed_name(name, transform):
"""
r"""
Consistent way of transforming names
Parameters
......@@ -67,7 +67,7 @@ def get_transformed_name(name, transform):
def is_transformed_name(name):
"""
r"""
Quickly check if a name was transformed with `get_transformed_name`
Parameters
......@@ -84,7 +84,7 @@ def is_transformed_name(name):
def get_untransformed_name(name):
"""
r"""
Undo transformation in `get_transformed_name`. Throws ValueError if name wasn't transformed
Parameters
......@@ -103,7 +103,7 @@ def get_untransformed_name(name):
def get_default_varnames(var_iterator, include_transformed):
"""Helper to extract default varnames from a trace.
r"""Helper to extract default varnames from a trace.
Parameters
----------
......@@ -124,7 +124,7 @@ def get_default_varnames(var_iterator, include_transformed):
def get_variable_name(variable):
"""Returns the variable data type if it is a constant, otherwise
r"""Returns the variable data type if it is a constant, otherwise
returns the argument name.
"""
name = variable.name
......@@ -145,7 +145,7 @@ def get_variable_name(variable):
def update_start_vals(a, b, model):
"""Update a with b, without overwriting existing keys. Values specified for
r"""Update a with b, without overwriting existing keys. Values specified for
transformed variables on the original scale are also transformed and inserted.
"""
if model is not None:
......
......@@ -292,7 +292,7 @@ class Inference:
class KLqp(Inference):
"""**Kullback Leibler Divergence Inference**
r"""**Kullback Leibler Divergence Inference**
General approach to fit Approximations that define :math:`logq`
by maximizing ELBO (Evidence Lower Bound). In some cases
......
......@@ -164,9 +164,9 @@ class ObjectiveFunction:
Parameters
----------
op: :class:`Operator`
op : :class:`Operator`
OPVI Functional operator
tf: :class:`TestFunction`
tf : :class:`TestFunction`
OPVI TestFunction
"""
......@@ -186,23 +186,23 @@ class ObjectiveFunction:
Parameters
----------
obj_n_mc: `int`
obj_n_mc : int
Number of monte carlo samples used for approximation of objective gradients
tf_n_mc: `int`
tf_n_mc : int
Number of monte carlo samples used for approximation of test function gradients
obj_optimizer: function (loss, params) -> updates
obj_optimizer : function (loss, params) -> updates
Optimizer that is used for objective params
test_optimizer: function (loss, params) -> updates
test_optimizer : function (loss, params) -> updates
Optimizer that is used for test function params
more_obj_params: `list`
more_obj_params : list
Add custom params for objective optimizer
more_tf_params: `list`
more_tf_params : list
Add custom params for test function optimizer
more_updates: `dict`
more_updates : dict
Add custom updates to resulting updates
more_replacements: `dict`
more_replacements : dict
Apply custom replacements before calculating gradients
total_grad_norm_constraint: `float`
total_grad_norm_constraint : float
Bounds gradient norm, prevents exploding gradient problem
Returns
......
......@@ -681,22 +681,22 @@ def rmsprop(loss_or_grads=None, params=None,
def adadelta(loss_or_grads=None, params=None,
learning_rate=1.0, rho=0.95, epsilon=1e-6):
""" Adadelta updates
r""" Adadelta updates
Scale learning rates by the ratio of accumulated gradients to accumulated
updates, see [1]_ and notes for further description.
Parameters
----------
loss_or_grads: symbolic expression or list of expressions
loss_or_grads : symbolic expression or list of expressions
A scalar loss expression, or a list of gradient expressions
params: list of shared variables
params : list of shared variables
The variables to generate update expressions for
learning_rate: float or symbolic scalar
learning_rate : float or symbolic scalar
The learning rate controlling the size of update steps
rho: float or symbolic scalar
rho : float or symbolic scalar
Squared gradient moving average decay factor
epsilon: float or symbolic scalar
epsilon : float or symbolic scalar
Small value added for numerical stability
Returns
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment