Skip to content

Run black on core notebooks #5901

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
Jun 17, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@ repos:
rev: 22.3.0
hooks:
- id: black
- id: black-jupyter
- repo: https://github.com/PyCQA/pylint
rev: v2.14.0
hooks:
Expand Down
1 change: 1 addition & 0 deletions conda-envs/environment-dev-py37.yml
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ dependencies:
- h5py>=2.7
- ipython>=7.16
- jax
- jupyter-sphinx
- myst-nb
- numpy>=1.15.0
- numpydoc
Expand Down
1 change: 1 addition & 0 deletions conda-envs/environment-dev-py38.yml
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ dependencies:
- h5py>=2.7
- ipython>=7.16
- jax
- jupyter-sphinx
- myst-nb
- numpy>=1.15.0
- numpydoc
Expand Down
1 change: 1 addition & 0 deletions conda-envs/environment-dev-py39.yml
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ dependencies:
- h5py>=2.7
- ipython>=7.16
- jax
- jupyter-sphinx
- myst-nb
- numpy>=1.15.0
- numpydoc
Expand Down
1 change: 1 addition & 0 deletions conda-envs/windows-environment-dev-py38.yml
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ dependencies:
- cloudpickle
- fastprogress>=0.2.0
- h5py>=2.7
- jupyter-sphinx
- numpy>=1.15.0
- pandas>=0.24.0
- pip
Expand Down
6 changes: 3 additions & 3 deletions docs/source/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -144,13 +144,13 @@
"about/featured_testimonials.md",
]

# myst and panels config
jupyter_execute_notebooks = "force"
# myst config
nb_execution_mode = "force"
nb_kernel_rgx_aliases = {".*": "python3"}
myst_enable_extensions = ["colon_fence", "deflist", "dollarmath", "amsmath", "substitution"]
myst_substitutions = {
"version_slug": rtd_version,
}
panels_add_bootstrap_css = False
myst_heading_anchors = None

rediraffe_redirects = {
Expand Down
2 changes: 1 addition & 1 deletion docs/source/learn/core_notebooks/GLM_linear.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -402,7 +402,7 @@
"metadata": {},
"outputs": [],
"source": [
"idata.posterior[\"y_model\"] = idata.posterior[\"Intercept\"] + idata.posterior[\"x\"]*xr.DataArray(x)"
"idata.posterior[\"y_model\"] = idata.posterior[\"Intercept\"] + idata.posterior[\"x\"] * xr.DataArray(x)"
]
},
{
Expand Down
34 changes: 19 additions & 15 deletions docs/source/learn/core_notebooks/dimensionality.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -160,7 +160,7 @@
}
],
"source": [
"random_sample = pm.Normal.dist(mu=[1,10,100], sigma=.0001).eval()\n",
"random_sample = pm.Normal.dist(mu=[1, 10, 100], sigma=0.0001).eval()\n",
"random_sample, random_sample.shape"
]
},
Expand Down Expand Up @@ -310,11 +310,11 @@
],
"source": [
"with pm.Model() as pmodel:\n",
" pm.Normal(\"scalar\") # shape=()\n",
" pm.Normal(\"vector (implied)\", mu=[1,2,3])\n",
" pm.Normal(\"scalar\") # shape=()\n",
" pm.Normal(\"vector (implied)\", mu=[1, 2, 3])\n",
" pm.Normal(\"vector (from shape)\", shape=(4,))\n",
" pm.Normal(\"vector (from size)\", size=(5,))\n",
" \n",
"\n",
"pm.model_to_graphviz(pmodel)"
]
},
Expand Down Expand Up @@ -401,7 +401,7 @@
"with pm.Model() as pmodel:\n",
" pm.Normal(\"red\", size=2, dims=\"B\")\n",
"\n",
" pm.Normal(\"one\", [1,2,3,4], dims=\"Dim_A\") # (4,)\n",
" pm.Normal(\"one\", [1, 2, 3, 4], dims=\"Dim_A\") # (4,)\n",
" pm.Normal(\"two\", dims=\"Dim_A\")\n",
"\n",
"\n",
Expand All @@ -421,10 +421,12 @@
"metadata": {},
"outputs": [],
"source": [
"with pm.Model(coords={\n",
" \"year\": [2020, 2021, 2022],\n",
"}) as pmodel:\n",
" \n",
"with pm.Model(\n",
" coords={\n",
" \"year\": [2020, 2021, 2022],\n",
" }\n",
") as pmodel:\n",
"\n",
" pm.Normal(\"Normal_RV\", dims=\"year\")\n",
"\n",
" pm.model_to_graphviz(pmodel)"
Expand Down Expand Up @@ -483,7 +485,7 @@
}
],
"source": [
"pm.MvNormal.dist(mu=[[1,2,3], [4,5,6]], cov=np.eye(3)*.0001).eval()"
"pm.MvNormal.dist(mu=[[1, 2, 3], [4, 5, 6]], cov=np.eye(3) * 0.0001).eval()"
]
},
{
Expand Down Expand Up @@ -587,9 +589,11 @@
}
],
"source": [
"with pm.Model(coords={\n",
" \"year\": [2020, 2021, 2022],\n",
"}) as pmodel:\n",
"with pm.Model(\n",
" coords={\n",
" \"year\": [2020, 2021, 2022],\n",
" }\n",
") as pmodel:\n",
" mv = pm.MvNormal(\"implied\", mu=[0, 0, 0], cov=np.eye(3))\n",
" print(mv.shape.eval())\n",
"\n",
Expand All @@ -598,11 +602,11 @@
"\n",
" mv = pm.MvNormal(\"with size\", mu=[0, 0], cov=np.eye(2), size=3, dims=(\"repeats\", \"implied\"))\n",
" print(mv.shape.eval())\n",
" \n",
"\n",
" # ⚠ Size dims are always __prepended__\n",
" mv = pm.MvNormal(\"with shape\", mu=[0, 0], cov=np.eye(2), shape=(3, ...), dims=(\"repeats\", ...))\n",
" print(mv.shape.eval())\n",
" \n",
"\n",
" mv = pm.MvNormal(\"with coords\", mu=[0, 0], cov=np.eye(2), dims=(\"year\", ...))\n",
" print(mv.shape.eval())\n",
"\n",
Expand Down
21 changes: 15 additions & 6 deletions docs/source/learn/core_notebooks/posterior_predictive.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,7 @@
"RANDOM_SEED = 58\n",
"rng = np.random.default_rng(RANDOM_SEED)\n",
"\n",
"\n",
"def standardize(series):\n",
" \"\"\"Standardize a pandas series\"\"\"\n",
" return (series - series.mean()) / series.std()"
Expand Down Expand Up @@ -939,17 +940,20 @@
"source": [
"_, ax = plt.subplots()\n",
"\n",
"ax.plot(predictor_scaled, mu_pp.mean((\"chain\", \"draw\")), label=\"Mean outcome\", color=\"C1\", alpha=0.6);\n",
"ax.plot(\n",
" predictor_scaled, mu_pp.mean((\"chain\", \"draw\")), label=\"Mean outcome\", color=\"C1\", alpha=0.6\n",
")\n",
"az.plot_lm(\n",
" idata=idata, \n",
" y=\"obs\", \n",
" x=predictor_scaled, \n",
" idata=idata,\n",
" y=\"obs\",\n",
" x=predictor_scaled,\n",
" kind_pp=\"hdi\",\n",
" y_kwargs={\"color\": \"C0\", \"marker\": \"o\", \"ms\": 4, \"alpha\": 0.4},\n",
" y_hat_fill_kwargs=dict(fill_kwargs={\"alpha\": 0.8}, color=\"xkcd:jade\"),\n",
" axes=ax,\n",
")\n",
"ax.set_xlabel(\"Predictor (stdz)\"); ax.set_ylabel(\"Outcome (stdz)\");"
"ax.set_xlabel(\"Predictor (stdz)\")\n",
"ax.set_ylabel(\"Outcome (stdz)\");"
]
},
{
Expand Down Expand Up @@ -1221,7 +1225,12 @@
" pm.set_data({\"pred\": predictors_out_of_sample})\n",
" # use the updated values and predict outcomes and probabilities:\n",
" idata_2 = pm.sample_posterior_predictive(\n",
" idata_2, var_names=[\"p\"], return_inferencedata=True, predictions=True, extend_inferencedata=True, random_seed=rng,\n",
" idata_2,\n",
" var_names=[\"p\"],\n",
" return_inferencedata=True,\n",
" predictions=True,\n",
" extend_inferencedata=True,\n",
" random_seed=rng,\n",
" )"
]
},
Expand Down
36 changes: 22 additions & 14 deletions docs/source/learn/core_notebooks/pymc_aesara.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -61,10 +61,12 @@
"import scipy.stats\n",
"\n",
"\n",
"print(f\"\"\"\n",
"print(\n",
" f\"\"\"\n",
"# Aesara version: {aesara.__version__}\n",
"# PyMC version: {pm.__version__}\n",
"\"\"\")"
"\"\"\"\n",
")"
]
},
{
Expand Down Expand Up @@ -133,13 +135,15 @@
"x = at.scalar(name=\"x\")\n",
"y = at.vector(name=\"y\")\n",
"\n",
"print(f\"\"\"\n",
"print(\n",
" f\"\"\"\n",
"x type: {x.type}\n",
"x name = {x.name}\n",
"---\n",
"y type: {y.type}\n",
"y name = {y.name}\n",
"\"\"\")"
"\"\"\"\n",
")"
]
},
{
Expand Down Expand Up @@ -331,7 +335,7 @@
}
],
"source": [
"w.eval({x: 0, y:[1, np.e]})"
"w.eval({x: 0, y: [1, np.e]})"
]
},
{
Expand Down Expand Up @@ -566,14 +570,16 @@
}
],
"source": [
"print(f\"\"\"\n",
"print(\n",
" f\"\"\"\n",
"z type: {z.type}\n",
"z name = {z.name}\n",
"z owner = {z.owner}\n",
"z owner inputs = {z.owner.inputs}\n",
"z owner op = {z.owner.op}\n",
"z owner output = {z.owner.outputs}\n",
"\"\"\")"
"\"\"\"\n",
")"
]
},
{
Expand Down Expand Up @@ -746,8 +752,8 @@
},
"outputs": [],
"source": [
"parent_of_w = w.owner.inputs[0] # get z tensor\n",
"new_parent_of_w = at.exp(parent_of_w) # modify the parent of w\n",
"parent_of_w = w.owner.inputs[0] # get z tensor\n",
"new_parent_of_w = at.exp(parent_of_w) # modify the parent of w\n",
"new_parent_of_w.name = \"exp(x + y)\""
]
},
Expand Down Expand Up @@ -878,7 +884,7 @@
}
],
"source": [
"new_w.eval({x: 0, y:[1, np.e]})"
"new_w.eval({x: 0, y: [1, np.e]})"
]
},
{
Expand Down Expand Up @@ -2050,7 +2056,7 @@
}
],
"source": [
" # Equivalent to rv_draw = pm.draw(rv, 3)\n",
"# Equivalent to rv_draw = pm.draw(rv, 3)\n",
"rv.rvs(3)"
]
},
Expand Down Expand Up @@ -2215,7 +2221,7 @@
"# element-wise log-probability of the model (we do not take te sum)\n",
"logp_graph = at.stack(model_2.logp(sum=False))\n",
"# evaluate by passing concrete values\n",
"logp_graph.eval({mu_value: 0, sigma_log_value: -10, x_value:0})"
"logp_graph.eval({mu_value: 0, sigma_log_value: -10, x_value: 0})"
]
},
{
Expand Down Expand Up @@ -2251,11 +2257,13 @@
}
],
"source": [
"print(f\"\"\"\n",
"print(\n",
" f\"\"\"\n",
"mu_value -> {scipy.stats.norm.logpdf(x=0, loc=0, scale=2)}\n",
"sigma_log_value -> {- 10 + scipy.stats.halfnorm.logpdf(x=np.exp(-10), loc=0, scale=3)} \n",
"x_value -> {scipy.stats.norm.logpdf(x=0, loc=0, scale=np.exp(-10))}\n",
"\"\"\")\n"
"\"\"\"\n",
")"
]
},
{
Expand Down
Loading