{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# Uncertainties\n", "\n", "\n", "```{autolink-concat}\n", "```" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "jupyter": { "source_hidden": true }, "mystnb": { "code_prompt_show": "Import Python libraries" }, "tags": [ "hide-cell", "scroll-input" ] }, "outputs": [], "source": [ "from __future__ import annotations\n", "\n", "import itertools\n", "import json\n", "import logging\n", "import os\n", "import tarfile\n", "from collections import defaultdict\n", "from textwrap import wrap\n", "\n", "import jax.numpy as jnp\n", "import matplotlib.pyplot as plt\n", "import numpy as np\n", "import plotly.graph_objects as go\n", "import sympy as sp\n", "import yaml\n", "from ampform.kinematics.phasespace import is_within_phasespace\n", "from ampform.sympy import PoolSum\n", "from IPython.display import Latex, Markdown\n", "from matplotlib import cm\n", "from matplotlib.ticker import FuncFormatter, MultipleLocator\n", "from plotly.subplots import make_subplots\n", "from scipy.interpolate import griddata\n", "from tensorwaves.function.sympy import create_function\n", "from tensorwaves.interface import DataSample, ParametrizedFunction\n", "from tqdm.auto import tqdm\n", "\n", "from polarimetry import formulate_polarimetry\n", "from polarimetry.amplitude import AmplitudeModel\n", "from polarimetry.data import (\n", " create_data_transformer,\n", " generate_meshgrid_sample,\n", " generate_phasespace_sample,\n", ")\n", "from polarimetry.function import integrate_intensity, sub_intensity\n", "from polarimetry.io import (\n", " export_polarimetry_field,\n", " mute_jax_warnings,\n", " perform_cached_doit,\n", " perform_cached_lambdify,\n", ")\n", "from polarimetry.lhcb import (\n", " ParameterBootstrap,\n", " load_model_builder,\n", " load_model_parameters,\n", ")\n", "from polarimetry.lhcb.particle import load_particles\n", "from polarimetry.plot import add_watermark, use_mpl_latex_fonts\n", "\n", "logging.getLogger(\"polarimetry.io\").setLevel(logging.INFO)\n", "mute_jax_warnings()\n", "FUNCTION_CACHE: dict[sp.Expr, ParametrizedFunction] = {}\n", "UNFOLDED_POOLSUM_CACHE: dict[PoolSum, sp.Expr] = {}\n", "\n", "NO_TQDM = \"EXECUTE_NB\" in os.environ\n", "if NO_TQDM:\n", " logging.getLogger().setLevel(logging.ERROR)\n", " logging.getLogger(\"polarimetry.io\").setLevel(logging.ERROR)" ] }, { "cell_type": "markdown", "metadata": { "tags": [] }, "source": [ "## Model loading" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "jupyter": { "source_hidden": true }, "mystnb": { "code_prompt_show": "Formulate models" }, "tags": [ "hide-input" ] }, "outputs": [], "source": [ "model_file = \"../data/model-definitions.yaml\"\n", "particles = load_particles(\"../data/particle-definitions.yaml\")\n", "reference_subsystem = 1\n", "with open(model_file) as f:\n", " model_titles = list(yaml.safe_load(f))\n", "\n", "models = {}\n", "for title in tqdm(model_titles, desc=\"Formulating models\", disable=NO_TQDM):\n", " amplitude_builder = load_model_builder(model_file, particles, model_id=title)\n", " model = amplitude_builder.formulate(reference_subsystem)\n", " imported_parameter_values = load_model_parameters(\n", " model_file, model.decay, title, particles\n", " )\n", " model.parameter_defaults.update(imported_parameter_values)\n", " models[title] = model" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "jupyter": { "source_hidden": true }, "mystnb": { "code_prompt_show": "Unfold symbolic expressions" }, "tags": [ "hide-input" ] }, "outputs": [], "source": [ "def unfold_poolsum(expr: PoolSum) -> sp.Expr:\n", " unfolded_expr = UNFOLDED_POOLSUM_CACHE.get(expr)\n", " if unfolded_expr is None:\n", " unfolded_expr = expr.doit()\n", " UNFOLDED_POOLSUM_CACHE[expr] = unfolded_expr\n", " return unfolded_expr\n", " return unfolded_expr\n", "\n", "\n", "nominal_model_title = \"Default amplitude model\"\n", "nominal_model = models[nominal_model_title]\n", "unfolded_exprs = {}\n", "for title, model in tqdm(\n", " models.items(), desc=\"Unfolding expressions\", disable=NO_TQDM\n", "):\n", " amplitude_builder = load_model_builder(model_file, particles, model_id=title)\n", " polarimetry_exprs = formulate_polarimetry(amplitude_builder, reference_subsystem)\n", " folded_exprs = {f\"alpha_{i}\": expr for i, expr in zip(\"xyz\", polarimetry_exprs)}\n", " folded_exprs[\"intensity\"] = model.intensity\n", " unfolded_exprs[title] = {\n", " key: perform_cached_doit(unfold_poolsum(expr).xreplace(model.amplitudes))\n", " for key, expr in tqdm(\n", " folded_exprs.items(), disable=NO_TQDM, leave=False, postfix=title\n", " )\n", " }" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "jupyter": { "source_hidden": true }, "mystnb": { "code_prompt_show": "Identify unique expressions" }, "tags": [ "hide-input" ] }, "outputs": [], "source": [ "expression_hashes = {\n", " title: hash(exprs[\"intensity\"]) for title, exprs in unfolded_exprs.items()\n", "}\n", "table = R\"\"\"\n", "$$\n", "\\begin{array}{rllr}\n", " && \\textbf{Model description} & n\\textbf{ ops.} \\\\\n", " \\hline\n", "\"\"\"\n", "for i, (title, expressions) in enumerate(unfolded_exprs.items()):\n", " expr = expressions[\"intensity\"]\n", " h = hash(expr)\n", " for j, v in enumerate(expression_hashes.values()):\n", " if h == v:\n", " break\n", " same_as = \"\"\n", " if i != j:\n", " same_as = f\"= {j}\"\n", " ops = sp.count_ops(expr)\n", " title = \"\".join(Rf\"\\text{{{t}}}\\\\ \" for t in wrap(title, width=56))\n", " title = Rf\"\\begin{{array}}{{l}}{title}\\end{{array}}\"\n", " title = title.replace(\"^\", R\"\\^{}\")\n", " table += Rf\" \\mathbf{{{i}}} & {same_as} & {title} & {ops:,} \\\\ \\hline\" \"\\n\"\n", "table += R\"\"\"\\end{array}\n", "$$\n", "\"\"\"\n", "\n", "n_models = len(models)\n", "n_unique_hashes = len(set(expression_hashes.values()))\n", "src = Rf\"\"\"\n", "Of the {n_models} models, there are {n_unique_hashes} with a unique expression tree.\n", "\"\"\"\n", "if NO_TQDM:\n", " src += \"\\n:::{dropdown} Show number of mathematical operations per model\\n\"\n", "src += table\n", "if NO_TQDM:\n", " src += \"\\n:::\"\n", "Markdown(src.strip())" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "jupyter": { "source_hidden": true }, "mystnb": { "code_prompt_show": "Convert to numerical functions" }, "tags": [ "hide-input" ] }, "outputs": [], "source": [ "def cached_lambdify(expr: sp.Expr, model: AmplitudeModel) -> ParametrizedFunction:\n", " func = FUNCTION_CACHE.get(expr)\n", " if func is None:\n", " func = perform_cached_lambdify(\n", " expr,\n", " parameters=model.parameter_defaults,\n", " backend=\"jax\",\n", " )\n", " FUNCTION_CACHE[expr] = func\n", " str_parameters = {str(k): v for k, v in model.parameter_defaults.items()}\n", " func.update_parameters(str_parameters)\n", " return func\n", "\n", "\n", "jax_functions = {}\n", "original_parameters: dict[str, dict[str, complex | float | int]] = {}\n", "progress_bar = tqdm(desc=\"Lambdifying to JAX\", disable=NO_TQDM, total=len(models))\n", "for title, model in models.items():\n", " progress_bar.set_postfix_str(title)\n", " jax_functions[title] = {\n", " key: cached_lambdify(expr, model)\n", " for key, expr in unfolded_exprs[title].items()\n", " }\n", " original_parameters[title] = dict(jax_functions[title][\"intensity\"].parameters)\n", " progress_bar.update()\n", "progress_bar.set_postfix_str(\"\")\n", "progress_bar.close()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Statistical uncertainties\n", "\n", "### Parameter bootstrapping" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "n_bootstraps = 100\n", "nominal_functions = jax_functions[nominal_model_title]\n", "bootstrap = ParameterBootstrap(model_file, nominal_model.decay, nominal_model_title)\n", "bootstrap_parameters = bootstrap.create_distribution(n_bootstraps, seed=0)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "n_events = 100_000\n", "transformer = create_data_transformer(nominal_model)\n", "phsp_sample = generate_phasespace_sample(nominal_model.decay, n_events, seed=0)\n", "phsp_sample = transformer(phsp_sample)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "jupyter": { "source_hidden": true }, "mystnb": { "code_prompt_show": "Compute polarimeter field and intensities (statistics & systematics)" }, "tags": [ "hide-input" ] }, "outputs": [], "source": [ "resonances = [chain.resonance for chain in nominal_model.decay.chains]\n", "nominal_parameters = dict(original_parameters[nominal_model_title])\n", "stat_grids = defaultdict(list)\n", "stat_decay_rates = defaultdict(list)\n", "for i in tqdm(\n", " range(n_bootstraps),\n", " desc=\"Computing polarimetry and intensities for parameter combinations\",\n", " disable=NO_TQDM,\n", "):\n", " new_parameters = {k: v[i] for k, v in bootstrap_parameters.items()}\n", " for key, func in nominal_functions.items():\n", " func.update_parameters(nominal_parameters)\n", " func.update_parameters(new_parameters)\n", " stat_grids[key].append(func(phsp_sample).real)\n", " I_tot = integrate_intensity(stat_grids[\"intensity\"][-1])\n", " for resonance in resonances:\n", " res_filter = resonance.name.replace(\"(\", R\"\\(\").replace(\")\", R\"\\)\")\n", " I_sub = sub_intensity(\n", " nominal_functions[\"intensity\"], phsp_sample, [res_filter]\n", " )\n", " stat_decay_rates[resonance.name].append(I_sub / I_tot)\n", "\n", "stat_intensities = jnp.array(stat_grids[\"intensity\"])\n", "stat_polarimetry = jnp.array([stat_grids[f\"alpha_{i}\"] for i in \"xyz\"])\n", "stat_polarimetry_norm = jnp.sqrt(jnp.sum(stat_polarimetry**2, axis=0))\n", "stat_decay_rates = {k: jnp.array(v) for k, v in stat_decay_rates.items()}" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Mean and standard deviations" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "assert stat_intensities.shape == (n_bootstraps, n_events)\n", "assert stat_polarimetry.shape == (3, n_bootstraps, n_events)\n", "assert stat_polarimetry_norm.shape == (n_bootstraps, n_events)\n", "n_bootstraps, n_events" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "jupyter": { "source_hidden": true }, "tags": [ "remove-cell" ] }, "outputs": [], "source": [ "bootstrap_selection = slice(0, 4)\n", "event_selection = slice(0, 3)\n", "np.testing.assert_almost_equal(\n", " np.array(stat_intensities[bootstrap_selection, event_selection]),\n", " [\n", " [2181.869539, 5457.018803, 6252.331785],\n", " [2272.483121, 5683.29221, 6329.830268],\n", " [2036.791629, 5087.462553, 6125.017938],\n", " [2053.192849, 5263.222729, 6142.861638],\n", " ],\n", " decimal=6,\n", ")\n", "np.testing.assert_almost_equal(\n", " np.array(stat_polarimetry_norm[bootstrap_selection, 0]),\n", " [0.71179, 0.709137, 0.71447, 0.734856],\n", " decimal=6,\n", ")\n", "del bootstrap_selection, event_selection" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "jupyter": { "source_hidden": true }, "mystnb": { "code_prompt_show": "Compute statistical measures (mean, std, etc.)" }, "tags": [ "hide-input" ] }, "outputs": [], "source": [ "stat_alpha_mean = [\n", " jnp.mean(stat_polarimetry_norm, axis=0),\n", " *jnp.mean(stat_polarimetry, axis=1),\n", "]\n", "stat_alpha_std = [\n", " jnp.std(stat_polarimetry_norm, axis=0),\n", " *jnp.std(stat_polarimetry, axis=1),\n", "]\n", "\n", "stat_alpha_times_I_mean = [\n", " jnp.mean(stat_polarimetry_norm * stat_intensities, axis=0),\n", " *jnp.mean(stat_polarimetry * stat_intensities, axis=1),\n", "]\n", "stat_alpha_times_I_std = [\n", " jnp.std(stat_polarimetry_norm * stat_intensities, axis=0),\n", " *jnp.std(stat_polarimetry * stat_intensities, axis=1),\n", "]\n", "stat_alpha_times_I_mean = jnp.array(stat_alpha_times_I_mean)\n", "stat_alpha_times_I_std = jnp.array(stat_alpha_times_I_std)\n", "\n", "stat_intensity_mean = jnp.mean(stat_intensities, axis=0)\n", "stat_intensity_std = jnp.std(stat_intensities, axis=0)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Distributions" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "jupyter": { "source_hidden": true }, "mystnb": { "code_prompt_show": "Define grid for plotting" }, "tags": [ "hide-input" ] }, "outputs": [], "source": [ "def interpolate_to_grid(values: np.ndarray, method: str = \"linear\"):\n", " return griddata(POINTS, values, (X, Y))\n", "\n", "\n", "resolution = 200\n", "POINTS = np.transpose(\n", " [\n", " phsp_sample[\"sigma1\"],\n", " phsp_sample[\"sigma2\"],\n", " ]\n", ")\n", "grid_sample = generate_meshgrid_sample(nominal_model.decay, resolution)\n", "X = np.array(grid_sample[\"sigma1\"])\n", "Y = np.array(grid_sample[\"sigma2\"])" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "jupyter": { "source_hidden": true }, "mystnb": { "code_prompt_show": "Code for indicating Dalitz region" }, "tags": [ "hide-cell", "scroll-input" ] }, "outputs": [], "source": [ "def create_contour(phsp: DataSample) -> jnp.ndarray:\n", " # See also https://compwa-org.rtfd.io/report/017.html\n", " m0, m1, m2, m3, s1, s2 = sp.symbols(\"m(:4) sigma(1:3)\", nonnegative=True)\n", " filter_expr = is_within_phasespace(\n", " s1,\n", " s2,\n", " nominal_model.parameter_defaults[m0],\n", " nominal_model.parameter_defaults[m1],\n", " nominal_model.parameter_defaults[m2],\n", " nominal_model.parameter_defaults[m3],\n", " outside_value=0,\n", " ).doit()\n", " filter_func = create_function(filter_expr, backend=\"jax\")\n", " return filter_func(contour_sample) # noqa: F821\n", "\n", "\n", "def draw_dalitz_contour(\n", " ax, color: str = \"black\", width: float = 0.1, **kwargs\n", ") -> None:\n", " ax.contour(\n", " X_CONTOUR,\n", " Y_CONTOUR,\n", " Z_CONTOUR,\n", " colors=color,\n", " linewidths=width,\n", " **kwargs,\n", " )\n", "\n", "\n", "contour_sample = generate_meshgrid_sample(nominal_model.decay, resolution=500)\n", "X_CONTOUR = np.array(contour_sample[\"sigma1\"])\n", "Y_CONTOUR = np.array(contour_sample[\"sigma2\"])\n", "Z_CONTOUR = create_contour(contour_sample)\n", "del contour_sample, create_contour" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "jupyter": { "source_hidden": true }, "tags": [ "hide-input", "full-width", "scroll-input" ] }, "outputs": [], "source": [ "%config InlineBackend.figure_formats = ['png']\n", "plt.rcdefaults()\n", "use_mpl_latex_fonts()\n", "plt.rc(\"font\", size=18)\n", "fig, axes = plt.subplots(\n", " dpi=200,\n", " figsize=(16.7, 8),\n", " gridspec_kw={\"width_ratios\": [1, 1, 1, 1.18]},\n", " ncols=4,\n", " nrows=2,\n", " sharex=True,\n", " sharey=True,\n", ")\n", "plt.subplots_adjust(hspace=0.02, wspace=0.02)\n", "fig.suptitle(R\"Polarimetry field $\\vec\\alpha$ (statistical \\& systematic)\")\n", "s1_label = R\"$m^2\\left(K^-\\pi^+\\right)$ [GeV$^2$]\"\n", "s2_label = R\"$m^2\\left(pK^-\\right)$ [GeV$^2$]\"\n", "s3_label = R\"$m^2\\left(p\\pi^+\\right)$ [GeV$^2$]\"\n", "axes[0, 0].set_ylabel(s2_label)\n", "axes[1, 0].set_ylabel(s2_label)\n", "\n", "global_max_std = max(map(jnp.nanmax, stat_alpha_std))\n", "for i in range(4):\n", " if i != 0:\n", " title = Rf\"$\\alpha_{'xyz'[i-1]}$\"\n", " else:\n", " title = R\"$\\left|\\vec\\alpha\\right|$\"\n", " axes[0, i].set_title(title)\n", "\n", " draw_dalitz_contour(axes[0, i], zorder=10)\n", " Z = interpolate_to_grid(stat_alpha_mean[i])\n", " mesh = axes[0, i].pcolormesh(X, Y, Z, cmap=cm.RdYlGn_r)\n", " mesh.set_clim(vmin=-1, vmax=+1)\n", " if axes[0, i] is axes[0, -1]:\n", " c_bar = fig.colorbar(mesh, ax=axes[0, i], pad=0.01)\n", " c_bar.ax.set_ylabel(Rf\"$\\alpha$ averaged with {n_bootstraps} bootstraps\")\n", " c_bar.ax.set_yticks([-1, 0, +1])\n", " c_bar.ax.set_yticklabels([\"-1\", \"0\", \"+1\"])\n", "\n", " draw_dalitz_contour(axes[1, i], zorder=10)\n", " Z = interpolate_to_grid(stat_alpha_std[i])\n", " mesh = axes[1, i].pcolormesh(X, Y, Z)\n", " mesh.set_clim(vmin=0, vmax=global_max_std)\n", " axes[1, i].set_xlabel(s1_label)\n", " if axes[1, i] is axes[1, -1]:\n", " c_bar = fig.colorbar(mesh, ax=axes[1, i], pad=0.01)\n", " c_bar.ax.set_ylabel(\"standard deviation\")\n", "plt.show()" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "jupyter": { "source_hidden": true }, "tags": [ "hide-input", "full-width", "scroll-input" ] }, "outputs": [], "source": [ "%config InlineBackend.figure_formats = ['png']\n", "fig, axes = plt.subplots(\n", " dpi=200,\n", " figsize=(16.7, 8),\n", " gridspec_kw={\"width_ratios\": [1, 1, 1, 1.18]},\n", " ncols=4,\n", " nrows=2,\n", " sharex=True,\n", " sharey=True,\n", ")\n", "plt.subplots_adjust(hspace=0.02, wspace=0.02)\n", "fig.suptitle(R\"$\\vec\\alpha \\cdot I$ distributions (statistical \\& systematic)\")\n", "axes[0, 0].set_ylabel(s2_label)\n", "axes[1, 0].set_ylabel(s2_label)\n", "\n", "global_max_mean = jnp.nanmax(jnp.abs(stat_alpha_times_I_mean))\n", "global_max_std = jnp.nanmax(stat_alpha_times_I_std / stat_intensity_mean)\n", "for i in range(4):\n", " if i != 0:\n", " title = Rf\"$\\alpha_{'xyz'[i-1]}$\"\n", " else:\n", " title = R\"$\\left|\\vec\\alpha\\right|$\"\n", " axes[0, i].set_title(title)\n", " axes[1, i].set_xlabel(s1_label)\n", "\n", " draw_dalitz_contour(axes[0, i], zorder=10)\n", " Z = interpolate_to_grid(stat_alpha_times_I_mean[i])\n", " mesh = axes[0, i].pcolormesh(X, Y, Z, cmap=cm.RdYlGn_r)\n", " mesh.set_clim(vmin=-global_max_mean, vmax=+global_max_mean)\n", " if axes[0, i] is axes[0, -1]:\n", " c_bar = fig.colorbar(mesh, ax=axes[0, i], pad=0.01)\n", " c_bar.ax.set_ylabel(\n", " Rf\"$\\alpha \\cdot I$ averaged with {n_bootstraps} bootstraps\"\n", " )\n", "\n", " draw_dalitz_contour(axes[1, i], zorder=10)\n", " Z = interpolate_to_grid(stat_alpha_times_I_std[i] / stat_intensity_mean)\n", " mesh = axes[1, i].pcolormesh(X, Y, Z)\n", " mesh.set_clim(vmin=0, vmax=global_max_std)\n", " if axes[1, i] is axes[1, -1]:\n", " c_bar = fig.colorbar(mesh, ax=axes[1, i], pad=0.01)\n", " c_bar.ax.set_ylabel(\"standard deviation / intensity\")\n", "plt.show()" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "jupyter": { "source_hidden": true }, "tags": [ "hide-input", "full-width" ] }, "outputs": [], "source": [ "%config InlineBackend.figure_formats = ['png']\n", "fig, (ax1, ax2) = plt.subplots(\n", " dpi=200,\n", " figsize=(12, 6.2),\n", " ncols=2,\n", " sharey=True,\n", ")\n", "fig.suptitle(R\"Intensity distribution (statistical \\& systematics)\", y=0.95)\n", "ax1.set_xlabel(s1_label)\n", "ax2.set_xlabel(s1_label)\n", "ax1.set_ylabel(s2_label)\n", "\n", "Z = interpolate_to_grid(stat_intensity_mean)\n", "mesh = ax1.pcolormesh(X, Y, Z, cmap=cm.Reds)\n", "fig.colorbar(mesh, ax=ax1, pad=0.01)\n", "draw_dalitz_contour(ax1, width=0.2)\n", "ax1.set_title(f\"average of {n_bootstraps} bootstraps\")\n", "\n", "Z = interpolate_to_grid(stat_intensity_std / stat_intensity_mean)\n", "mesh = ax2.pcolormesh(X, Y, Z)\n", "fig.colorbar(mesh, ax=ax2, pad=0.01)\n", "draw_dalitz_contour(ax2, width=0.2)\n", "ax2.set_title(\"standard deviation / intensity\")\n", "fig.tight_layout()\n", "plt.show()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Comparison with nominal values" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "mystnb": { "code_prompt_show": "Compute nominal values" }, "tags": [ "hide-input" ] }, "outputs": [], "source": [ "for func in nominal_functions.values():\n", " func.update_parameters(nominal_parameters)\n", "nominal_intensity = nominal_functions[\"intensity\"](phsp_sample)\n", "nominal_polarimetry = jnp.array(\n", " [nominal_functions[f\"alpha_{i}\"](phsp_sample).real for i in \"xyz\"]\n", ")\n", "nominal_polarimetry_norm = jnp.sqrt(jnp.sum(nominal_polarimetry**2, axis=0))" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "jupyter": { "source_hidden": true }, "tags": [ "full-width", "hide-input", "scroll-input" ] }, "outputs": [], "source": [ "%config InlineBackend.figure_formats = ['png']\n", "fig, axes = plt.subplots(\n", " dpi=200,\n", " figsize=(17.3, 4),\n", " gridspec_kw={\"width_ratios\": [1, 1, 1, 1.2]},\n", " ncols=4,\n", " sharey=True,\n", ")\n", "plt.subplots_adjust(hspace=0.2, wspace=0.05)\n", "fig.suptitle(\"Comparison with nominal values\", y=1.04)\n", "axes[0].set_ylabel(s2_label)\n", "for ax in axes:\n", " ax.set_xlabel(s1_label)\n", "\n", "vmax = 5.0 # %\n", "for i in range(4):\n", " if i != 0:\n", " title = Rf\"$\\alpha_{'xyz'[i-1]}$\"\n", " z_values = jnp.abs(\n", " (stat_alpha_mean[i] - nominal_polarimetry[i - 1])\n", " / nominal_polarimetry[i - 1]\n", " )\n", " else:\n", " title = \"$I$\"\n", " z_values = 100 * jnp.abs(\n", " (stat_intensity_mean - nominal_intensity) / nominal_intensity\n", " )\n", " axes[i].set_title(title)\n", "\n", " draw_dalitz_contour(axes[i], zorder=10)\n", " Z = interpolate_to_grid(z_values)\n", " mesh = axes[i].pcolormesh(X, Y, Z, cmap=cm.Reds)\n", " mesh.set_clim(vmin=0, vmax=vmax)\n", " if axes[i] is axes[-1]:\n", " c_bar = fig.colorbar(mesh, ax=axes[i], pad=0.02)\n", " c_bar.ax.set_ylabel(R\"difference with nominal value (\\%)\")\n", "\n", "plt.show()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Systematic uncertainties" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "jupyter": { "source_hidden": true }, "tags": [ "hide-cell" ] }, "outputs": [], "source": [ "syst_grids = defaultdict(list)\n", "syst_decay_rates = defaultdict(list)\n", "progress_bar = tqdm(desc=\"Computing systematics\", disable=NO_TQDM, total=len(models))\n", "for title, model in models.items():\n", " progress_bar.set_postfix_str(title)\n", " for key, func in jax_functions[title].items():\n", " func.update_parameters(original_parameters[title])\n", " syst_grids[key].append(func(phsp_sample).real)\n", " I_tot = integrate_intensity(syst_grids[\"intensity\"][-1])\n", " intensity_func = jax_functions[title][\"intensity\"]\n", " for resonance in resonances:\n", " res_filter = resonance.name.replace(\"(\", R\"\\(\").replace(\")\", R\"\\)\")\n", " I_sub = sub_intensity(intensity_func, phsp_sample, [res_filter])\n", " syst_decay_rates[resonance.name].append(I_sub / I_tot)\n", " progress_bar.update()\n", "progress_bar.set_postfix_str(\"\")\n", "progress_bar.close()\n", "\n", "syst_intensities = jnp.array(syst_grids[\"intensity\"])\n", "syst_polarimetry = jnp.array([syst_grids[f\"alpha_{i}\"] for i in \"xyz\"])\n", "syst_polarimetry_norm = jnp.sqrt(jnp.sum(syst_polarimetry**2, axis=0))\n", "syst_decay_rates = {k: jnp.array(v) for k, v in syst_decay_rates.items()}" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Mean and standard deviations" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "tags": [] }, "outputs": [], "source": [ "n_models = len(models)\n", "assert syst_intensities.shape == (n_models, n_events)\n", "assert syst_polarimetry.shape == (3, n_models, n_events)\n", "assert syst_polarimetry_norm.shape == (n_models, n_events)\n", "n_models, n_events" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "jupyter": { "source_hidden": true }, "tags": [ "remove-cell" ] }, "outputs": [], "source": [ "model_selection = [0, 1, 7, 8]\n", "event_selection = slice(0, 3)\n", "np.testing.assert_almost_equal(\n", " np.array(syst_intensities[model_selection, event_selection]),\n", " [\n", " [2158.167525, 5447.080914, 6232.405023],\n", " [2423.450221, 6139.150418, 6986.220964],\n", " [2336.875277, 6342.282093, 6207.238291],\n", " [2419.41275, 6035.253491, 6996.560684],\n", " ],\n", " decimal=6,\n", ")\n", "np.testing.assert_almost_equal(\n", " np.array(syst_polarimetry_norm[model_selection, 0]),\n", " [0.720545, 0.719338, 0.634872, 0.733424],\n", " decimal=6,\n", ")\n", "del model_selection, event_selection" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "jupyter": { "source_hidden": true }, "mystnb": { "code_prompt_show": "Compute statistical measures (mean, std, etc.)" }, "tags": [ "hide-input" ] }, "outputs": [], "source": [ "syst_alpha_mean = [\n", " jnp.mean(syst_polarimetry_norm, axis=0),\n", " *jnp.mean(syst_polarimetry, axis=1),\n", "]\n", "alpha_diff_with_model_0 = [\n", " syst_polarimetry_norm - syst_polarimetry_norm[0],\n", " *(syst_polarimetry - syst_polarimetry[:, None, 0]),\n", "]\n", "\n", "syst_alpha_mean = jnp.array(syst_alpha_mean)\n", "alpha_diff_with_model_0 = jnp.array(alpha_diff_with_model_0)\n", "\n", "assert alpha_diff_with_model_0.shape == (4, n_models, n_events)\n", "assert jnp.nanmax(alpha_diff_with_model_0[:, 0]) == 0.0\n", "alpha_syst_extrema = jnp.abs(alpha_diff_with_model_0).max(axis=1)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "syst_polarimetry_times_I = [\n", " syst_polarimetry_norm * syst_intensities,\n", " *(syst_polarimetry * syst_intensities),\n", "]\n", "syst_polarimetry_times_I = jnp.array(syst_polarimetry_times_I)\n", "\n", "\n", "syst_alpha_times_I_mean = syst_polarimetry_times_I.mean(axis=1)\n", "syst_alpha_times_I_diff = (\n", " syst_polarimetry_times_I - syst_polarimetry_times_I[:, None, 0]\n", ")\n", "assert syst_alpha_times_I_diff.shape == (4, n_models, n_events)\n", "assert jnp.nanmax(syst_alpha_times_I_diff[:, 0]) == 0.0\n", "syst_alpha_times_I_extrema = jnp.abs(syst_alpha_times_I_diff).max(axis=1)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "intensity_diff_with_model_0 = syst_intensities - syst_intensities[0]\n", "intensity_extrema = jnp.nanmax(intensity_diff_with_model_0, axis=0)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "### Distributions" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "jupyter": { "source_hidden": true }, "mystnb": { "code_prompt_show": "Show 1D projections of each model" }, "tags": [ "scroll-input", "hide-cell" ] }, "outputs": [], "source": [ "def plot_intensity_distributions(sigma: int) -> None:\n", " original_font_size = plt.rcParams[\"font.size\"]\n", " use_mpl_latex_fonts()\n", " plt.rc(\"font\", size=10)\n", " n_subplots = n_models - 1\n", " nrows = int(np.floor(np.sqrt(n_subplots)))\n", " ncols = int(np.ceil(n_subplots / nrows))\n", " fig, axes = plt.subplots(\n", " figsize=2.0 * np.array([ncols, nrows]),\n", " dpi=200,\n", " ncols=ncols,\n", " nrows=nrows,\n", " sharex=True,\n", " sharey=True,\n", " )\n", " fig.subplots_adjust(hspace=0, wspace=0, bottom=0.1, left=0.06)\n", " x_label = {1: s1_label, 2: s2_label, 3: s3_label}[sigma]\n", " fig.text(0.5, 0.04, x_label, ha=\"center\")\n", " fig.text(0.04, 0.5, \"$I$ (normalized)\", va=\"center\", rotation=\"vertical\")\n", " for ax in axes.flatten()[n_subplots:]:\n", " fig.delaxes(ax)\n", " n_bins = 80\n", " nominal_bin_values, bin_edges = jnp.histogram(\n", " phsp_sample[f\"sigma{sigma}\"],\n", " weights=syst_intensities[0],\n", " bins=n_bins,\n", " density=True,\n", " )\n", " for i, (ax, intensities) in enumerate(\n", " zip(axes.flatten(), syst_intensities[1:]), 1\n", " ):\n", " ax.set_title(f\"Model {i}\", y=0.01)\n", " ax.set_yticks([])\n", " bin_values, _ = jnp.histogram(\n", " phsp_sample[f\"sigma{sigma}\"],\n", " weights=intensities,\n", " bins=n_bins,\n", " density=True,\n", " )\n", " ax.fill_between(\n", " bin_edges[:-1],\n", " bin_values,\n", " alpha=0.5,\n", " step=\"pre\",\n", " )\n", " ax.step(\n", " x=bin_edges[:-1],\n", " y=nominal_bin_values,\n", " linewidth=0.3,\n", " color=\"red\",\n", " )\n", " fig.savefig(f\"_images/intensity-distributions-sigma{sigma}.svg\")\n", " plt.show()\n", " use_mpl_latex_fonts()\n", " plt.rc(\"font\", size=original_font_size)\n", "\n", "\n", "%config InlineBackend.figure_formats = ['svg']\n", "plot_intensity_distributions(1)\n", "plot_intensity_distributions(2)\n", "plot_intensity_distributions(3)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ ":::{only} latex\n", "![](_images/intensity-distributions-sigma1.svg)\n", "![](_images/intensity-distributions-sigma2.svg)\n", "![](_images/intensity-distributions-sigma3.svg)\n", ":::" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "jupyter": { "source_hidden": true }, "tags": [ "hide-input", "full-width", "scroll-input" ] }, "outputs": [], "source": [ "%config InlineBackend.figure_formats = ['png']\n", "fig, axes = plt.subplots(\n", " dpi=200,\n", " figsize=(16.7, 8),\n", " gridspec_kw={\"width_ratios\": [1, 1, 1, 1.18]},\n", " ncols=4,\n", " nrows=2,\n", " sharex=True,\n", " sharey=True,\n", ")\n", "plt.subplots_adjust(hspace=0.02, wspace=0.02)\n", "fig.suptitle(R\"Polarimetry field $\\vec\\alpha$ (model)\")\n", "axes[0, 0].set_ylabel(s2_label)\n", "axes[1, 0].set_ylabel(s2_label)\n", "\n", "global_max_std = jnp.nanmax(alpha_syst_extrema)\n", "for i in range(4):\n", " if i != 0:\n", " title = Rf\"$\\alpha_{'xyz'[i-1]}$\"\n", " else:\n", " title = R\"$\\left|\\vec\\alpha\\right|$\"\n", " axes[0, i].set_title(title)\n", "\n", " draw_dalitz_contour(axes[0, i], zorder=10)\n", " Z = interpolate_to_grid(syst_alpha_mean[i])\n", " mesh = axes[0, i].pcolormesh(X, Y, Z, cmap=cm.RdYlGn_r)\n", " mesh.set_clim(vmin=-1, vmax=+1)\n", " if axes[0, i] is axes[0, -1]:\n", " c_bar = fig.colorbar(mesh, ax=axes[0, i], pad=0.01)\n", " c_bar.ax.set_ylabel(Rf\"$\\alpha$ averaged with {n_models} models\")\n", " c_bar.ax.set_yticks([-1, 0, +1])\n", " c_bar.ax.set_yticklabels([\"-1\", \"0\", \"+1\"])\n", "\n", " draw_dalitz_contour(axes[1, i], zorder=10)\n", " Z = interpolate_to_grid(alpha_syst_extrema[i])\n", " mesh = axes[1, i].pcolormesh(X, Y, Z)\n", " mesh.set_clim(vmin=0, vmax=global_max_std)\n", " axes[1, i].set_xlabel(s1_label)\n", " if axes[1, i] is axes[1, -1]:\n", " c_bar = fig.colorbar(mesh, ax=axes[1, i], pad=0.01)\n", " c_bar.ax.set_ylabel(\"maximum absolute difference\\nwith the default model\")\n", "plt.show()" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "jupyter": { "source_hidden": true }, "tags": [ "hide-input", "full-width", "scroll-input" ] }, "outputs": [], "source": [ "%config InlineBackend.figure_formats = ['png']\n", "fig, axes = plt.subplots(\n", " dpi=200,\n", " figsize=(16.7, 8),\n", " gridspec_kw={\"width_ratios\": [1, 1, 1, 1.18]},\n", " ncols=4,\n", " nrows=2,\n", " sharex=True,\n", " sharey=True,\n", ")\n", "plt.subplots_adjust(hspace=0.02, wspace=0.02)\n", "axes[0, 0].set_ylabel(s2_label)\n", "axes[1, 0].set_ylabel(s2_label)\n", "\n", "syst_intensity_mean = jnp.mean(syst_intensities, axis=0)\n", "global_max_mean = jnp.nanmax(jnp.abs(syst_alpha_times_I_mean))\n", "global_max_diff = jnp.nanmax(syst_alpha_times_I_extrema / syst_intensity_mean)\n", "for i in range(4):\n", " if i != 0:\n", " title = Rf\"$\\alpha_{'xyz'[i-1]}$\"\n", " else:\n", " title = R\"$\\left|\\vec\\alpha\\right|$\"\n", " axes[0, i].set_title(title)\n", " axes[1, i].set_xlabel(s1_label)\n", "\n", " draw_dalitz_contour(axes[0, i], zorder=10)\n", " Z = interpolate_to_grid(syst_alpha_times_I_mean[i])\n", " mesh = axes[0, i].pcolormesh(X, Y, Z, cmap=cm.RdYlGn_r)\n", " mesh.set_clim(vmin=-global_max_mean, vmax=+global_max_mean)\n", " if axes[0, i] is axes[0, -1]:\n", " c_bar = fig.colorbar(mesh, ax=axes[0, i], pad=0.01)\n", " c_bar.ax.set_ylabel(Rf\"$\\alpha \\cdot I$ averaged with {n_models} models\")\n", "\n", " draw_dalitz_contour(axes[1, i], zorder=10)\n", " Z = interpolate_to_grid(syst_alpha_times_I_extrema[i] / syst_intensity_mean)\n", " mesh = axes[1, i].pcolormesh(X, Y, Z)\n", " mesh.set_clim(vmin=0, vmax=global_max_diff)\n", " if axes[1, i] is axes[1, -1]:\n", " c_bar = fig.colorbar(mesh, ax=axes[1, i], pad=0.01)\n", " c_bar.ax.set_ylabel(\n", " \"maximum absolute difference\\n\"\n", " \"with the default model\\n\"\n", " \"divided by nominal intensity\"\n", " )\n", "plt.show()" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "jupyter": { "source_hidden": true }, "tags": [ "hide-input", "full-width" ] }, "outputs": [], "source": [ "%config InlineBackend.figure_formats = ['png']\n", "fig, (ax1, ax2) = plt.subplots(\n", " dpi=200,\n", " figsize=(12, 6.9),\n", " ncols=2,\n", " sharey=True,\n", ")\n", "fig.suptitle(\"Intensity distribution (model)\", y=0.95)\n", "ax1.set_xlabel(s1_label)\n", "ax2.set_xlabel(s1_label)\n", "ax1.set_ylabel(s2_label)\n", "\n", "Z = interpolate_to_grid(syst_intensity_mean)\n", "mesh = ax1.pcolormesh(X, Y, Z, cmap=cm.Reds)\n", "fig.colorbar(mesh, ax=ax1, pad=0.01)\n", "draw_dalitz_contour(ax1, width=0.2)\n", "ax1.set_title(f\"average of {n_models} models\")\n", "\n", "Z = interpolate_to_grid(intensity_extrema / syst_intensity_mean)\n", "mesh = ax2.pcolormesh(X, Y, Z)\n", "fig.colorbar(mesh, ax=ax2, pad=0.01)\n", "draw_dalitz_contour(ax2, width=0.2)\n", "ax2.set_title(\"maximum absolute difference\\n\" R\"with the default model (\\%)\")\n", "fig.tight_layout()\n", "plt.show()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Uncertainty on polarimetry\n", "\n", "For each bootstrap or alternative model $i$, we compute the angle between each aligned polarimeter vector $\\vec\\alpha_i$ and the one from the nominal model, $\\vec\\alpha_0$:\n", "\n", "$$\n", "\\cos\\theta_i = \\frac{\\vec \\alpha_i \\cdot \\vec \\alpha_0}{|\\alpha_i||\\alpha_0|}.\n", "$$\n", "\n", "The solid angle can then be computed as:\n", "\n", "$$\n", "\\delta\\Omega = \\int_0^{2\\pi}\\int_0^{\\theta} \\mathrm{d}\\phi \\, \\mathrm{d}\\cos\\theta = 2\\pi\\left(1-\\cos\\theta\\right).\n", "$$\n", "\n", "The statistical uncertainty is given by taking the standard deviation on the $\\delta\\Omega$ distribution and the systematic uncertainty is given by taking finding $\\theta_\\mathrm{max} = \\max\\theta_i$ and computing $\\delta\\Omega_\\mathrm{max}$ from that." ] }, { "cell_type": "code", "execution_count": null, "metadata": { "jupyter": { "source_hidden": true }, "tags": [ "hide-input", "full-width", "scroll-input" ] }, "outputs": [], "source": [ "def dot(array1: jnp.ndarray, array2: jnp.ndarray, axis=0) -> jnp.ndarray:\n", " return jnp.sum(array1 * array2, axis=axis)\n", "\n", "\n", "def norm(array: jnp.ndarray, axis=0) -> jnp.ndarray:\n", " return jnp.sqrt(dot(array, array, axis=axis))\n", "\n", "\n", "def compute_theta(polarimetry: jnp.ndarray) -> jnp.ndarray:\n", " return jnp.arccos(\n", " dot(polarimetry, nominal_polarimetry[:, None])\n", " / (norm(polarimetry) * norm(nominal_polarimetry))\n", " )\n", "\n", "\n", "def compute_solid_angle(theta: jnp.ndarray) -> jnp.ndarray:\n", " return 2 * jnp.pi * (1 - jnp.cos(theta))\n", "\n", "\n", "stat_theta = compute_theta(stat_polarimetry)\n", "syst_theta = compute_theta(syst_polarimetry)\n", "assert stat_theta.shape == (n_bootstraps, n_events)\n", "assert syst_theta.shape == (n_models, n_events)\n", "\n", "stat_solid_angle = jnp.nanstd(compute_solid_angle(stat_theta), axis=0)\n", "syst_solid_angle = jnp.nanmax(compute_solid_angle(syst_theta), axis=0)\n", "\n", "\n", "def plot_angle_uncertainties(watermark: bool, titles: bool) -> None:\n", " plt.ioff()\n", " fig, axes = plt.subplots(\n", " dpi=200,\n", " figsize=(11, 4),\n", " ncols=2,\n", " )\n", " fig.subplots_adjust(wspace=0.3)\n", " ax1, ax2 = axes\n", " if titles:\n", " fig.suptitle(R\"Uncertainty over $\\vec\\alpha$ polar angle\", y=1.04)\n", " ax1.set_title(R\"Statistical \\& systematic\")\n", " ax2.set_title(\"Model\")\n", " for ax in axes:\n", " ax.set_box_aspect(1)\n", " ax.set_xlabel(s1_label)\n", " ax.set_ylabel(s2_label)\n", " draw_dalitz_contour(ax, width=0.2, zorder=10)\n", "\n", " Z = interpolate_to_grid(stat_solid_angle)\n", " mesh = ax1.pcolormesh(X, Y, Z, cmap=plt.cm.YlOrRd)\n", " mesh.set_clim(0, 4 * np.pi)\n", " c_bar = fig.colorbar(mesh, ax=ax1, pad=0.02)\n", " c_bar.ax.set_ylabel(R\"Std. of $\\delta\\Omega$\")\n", " c_bar.ax.set_yticks([0, 2 * np.pi, 4 * np.pi])\n", " c_bar.ax.set_yticklabels([\"$0$\", R\"$2\\pi$\", R\"$4\\pi$\"])\n", "\n", " Z = interpolate_to_grid(syst_solid_angle)\n", " mesh = ax2.pcolormesh(X, Y, Z, cmap=plt.cm.YlOrRd)\n", " mesh.set_clim(0, 4 * np.pi)\n", " c_bar = fig.colorbar(mesh, ax=ax2, pad=0.02)\n", " c_bar.ax.set_ylabel(R\"Max. of $\\delta\\Omega$\")\n", " c_bar.ax.set_yticks([0, 2 * np.pi, 4 * np.pi])\n", " c_bar.ax.set_yticklabels([\"$0$\", R\"$2\\pi$\", R\"$4\\pi$\"])\n", " output_file = \"polarimetry-field-angle-uncertainties\"\n", " if watermark:\n", " output_file += \"-watermark\"\n", " add_watermark_to_uncertainties(ax1)\n", " add_watermark_to_uncertainties(ax2)\n", " if titles:\n", " output_file += \"-with-titles\"\n", " fig.savefig(f\"_static/images/{output_file}.png\", bbox_inches=\"tight\")\n", " if watermark and titles:\n", " plt.show()\n", " plt.close(fig)\n", " plt.ion()\n", "\n", "\n", "def add_watermark_to_uncertainties(ax) -> None:\n", " add_watermark(ax, 0.70, 0.82, fontsize=16)\n", "\n", "\n", "%config InlineBackend.figure_formats = ['png']\n", "plt.rcdefaults()\n", "use_mpl_latex_fonts()\n", "plt.rc(\"font\", size=16)\n", "\n", "boolean_combinations = list(itertools.product(*2 * [[False, True]]))\n", "for watermark, titles in tqdm(boolean_combinations, disable=NO_TQDM):\n", " plot_angle_uncertainties(watermark, titles)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "jupyter": { "source_hidden": true }, "tags": [ "hide-input", "full-width", "scroll-input" ] }, "outputs": [], "source": [ "stat_alpha_difference = norm(stat_polarimetry - nominal_polarimetry[:, None])\n", "syst_alpha_difference = norm(syst_polarimetry - nominal_polarimetry[:, None])\n", "nominal_polarimetry_norm = norm(nominal_polarimetry[:, None])\n", "stat_alpha_rel_difference = 100 * stat_alpha_difference / nominal_polarimetry_norm\n", "syst_alpha_rel_difference = 100 * syst_alpha_difference / nominal_polarimetry_norm\n", "assert stat_alpha_rel_difference.shape == (n_bootstraps, n_events)\n", "assert syst_alpha_rel_difference.shape == (n_models, n_events)\n", "\n", "\n", "def create_figure(titles: bool):\n", " fig, axes = plt.subplots(\n", " dpi=200,\n", " figsize=(11.5, 4),\n", " ncols=2,\n", " )\n", " fig.subplots_adjust(wspace=0.4)\n", " ax1, ax2 = axes\n", " if titles:\n", " fig.suptitle(R\"Uncertainty over $\\left|\\vec\\alpha\\right|$\", y=1.04)\n", " ax1.set_title(R\"Statistical \\& systematic\")\n", " ax2.set_title(\"Model\")\n", " for ax in axes:\n", " ax.set_box_aspect(1)\n", " ax.set_xlabel(s1_label)\n", " ax.set_ylabel(s2_label)\n", " draw_dalitz_contour(ax, width=0.2)\n", " return fig, (ax1, ax2)\n", "\n", "\n", "def plot_norm_rel_uncertainties(watermark: bool, titles: bool) -> None:\n", " plt.ioff()\n", " fig, (ax1, ax2) = create_figure(titles)\n", " Z = interpolate_to_grid(jnp.nanstd(stat_alpha_rel_difference, axis=0))\n", " mesh = ax1.pcolormesh(X, Y, Z, cmap=plt.cm.YlOrRd)\n", " mesh.set_clim(0, 100)\n", " c_bar = fig.colorbar(mesh, ax=ax1, pad=0.02)\n", " c_bar.ax.set_ylabel(\n", " R\"Std. of\"\n", " R\" $\\frac{|\\alpha^{(i)}-\\alpha^\\mathrm{default}|}{|\\alpha^\\mathrm{default}|}$\"\n", " )\n", " c_bar.ax.set_yticks([0, 50, 100])\n", " c_bar.ax.set_yticklabels([R\"0\\%\", R\"50\\%\", R\"100\\%\"])\n", " Z = interpolate_to_grid(jnp.nanmax(syst_alpha_rel_difference, axis=0))\n", " mesh = ax2.pcolormesh(X, Y, Z, cmap=plt.cm.YlOrRd)\n", " mesh.set_clim(0, 100)\n", " c_bar = fig.colorbar(mesh, ax=ax2, pad=0.02)\n", " c_bar.ax.set_ylabel(\n", " R\"Max. of\"\n", " R\" $\\frac{|\\alpha^{(i)}-\\alpha^\\mathrm{default}|}{|\\alpha^\\mathrm{default}|}$\"\n", " )\n", " c_bar.ax.set_yticks([0, 50, 100])\n", " c_bar.ax.set_yticklabels([R\"0\\%\", R\"50\\%\", R\"100\\%\"])\n", " output_file = \"polarimetry-field-norm-uncertainties-relative\"\n", " if watermark:\n", " output_file += \"-watermark\"\n", " add_watermark_to_uncertainties(ax1)\n", " add_watermark_to_uncertainties(ax2)\n", " if titles:\n", " output_file += \"-with-titles\"\n", " fig.savefig(f\"_static/images/{output_file}.png\", bbox_inches=\"tight\")\n", " if watermark and titles:\n", " plt.show()\n", " plt.close(fig)\n", " plt.ion()\n", "\n", "\n", "def plot_norm_uncertainties(watermark: bool, titles: bool) -> None:\n", " plt.ioff()\n", " vmax = 0.5\n", " fig, (ax1, ax2) = create_figure(titles)\n", " Z = interpolate_to_grid(jnp.nanstd(stat_alpha_difference, axis=0))\n", " mesh = ax1.pcolormesh(X, Y, Z, cmap=plt.cm.YlOrRd)\n", " c_bar = fig.colorbar(mesh, ax=ax1, pad=0.02)\n", " mesh.set_clim(0, vmax)\n", " c_bar.ax.set_ylabel(R\"Std. of $|\\alpha^{(i)}-\\alpha^\\mathrm{default}|$\")\n", " Z = interpolate_to_grid(jnp.nanmax(syst_alpha_difference, axis=0))\n", " mesh = ax2.pcolormesh(X, Y, Z, cmap=plt.cm.YlOrRd)\n", " mesh.set_clim(0, vmax)\n", " c_bar = fig.colorbar(mesh, ax=ax2, pad=0.02)\n", " c_bar.ax.set_ylabel(R\"Max. of $|\\alpha^{(i)}-\\alpha^\\mathrm{default}|$\")\n", " output_file = \"polarimetry-field-norm-uncertainties\"\n", " if watermark:\n", " output_file += \"-watermark\"\n", " add_watermark_to_uncertainties(ax1)\n", " add_watermark_to_uncertainties(ax2)\n", " if titles:\n", " output_file += \"-with-titles\"\n", " fig.savefig(f\"_static/images/{output_file}.png\", bbox_inches=\"tight\")\n", " if watermark and titles:\n", " plt.show()\n", " plt.close(fig)\n", " plt.ion()\n", "\n", "\n", "%config InlineBackend.figure_formats = ['png']\n", "plt.rcdefaults()\n", "use_mpl_latex_fonts()\n", "plt.rc(\"font\", size=18)\n", "\n", "for watermark, titles in tqdm(boolean_combinations, disable=NO_TQDM):\n", " plot_norm_rel_uncertainties(watermark, titles)\n", " plot_norm_uncertainties(watermark, titles)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Decay rates" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "jupyter": { "source_hidden": true }, "tags": [ "remove-cell" ] }, "outputs": [], "source": [ "np.testing.assert_almost_equal(\n", " np.array(stat_decay_rates[\"L(1405)\"][:3]),\n", " [0.0799202, 0.0809439, 0.0789926],\n", ")\n", "np.testing.assert_almost_equal(\n", " np.array(syst_decay_rates[\"L(1405)\"]),\n", " [\n", " 0.0777893,\n", " 0.0788526,\n", " 0.0763691,\n", " 0.0777076,\n", " 0.0744958,\n", " 0.0678732,\n", " 0.1078605,\n", " 0.0524963,\n", " 0.0712318,\n", " 0.0619644,\n", " 0.0735088,\n", " 0.0777114,\n", " 0.0581079,\n", " 0.0766585,\n", " 0.0773498,\n", " 0.0759646,\n", " 0.0784241,\n", " 0.070247,\n", " ],\n", ")" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "jupyter": { "source_hidden": true }, "tags": [ "hide-input" ] }, "outputs": [], "source": [ "with open(\"../data/observable-references.yaml\") as f:\n", " data = yaml.load(f, Loader=yaml.SafeLoader)\n", "lhcb_values = {\n", " k: tuple(float(v) for v in row.split(\" ± \"))\n", " for k, row in data[nominal_model_title][\"rate\"].items()\n", "}\n", "\n", "src = R\"\"\"\n", "\\begin{array}{l|c|c}\n", "\\textbf{Resonance} & \\textbf{Decay rate} & \\textbf{LHCb} \\\\\n", "\\hline\n", "\"\"\"\n", "for resonance in resonances:\n", " ff_statistical = 100 * stat_decay_rates[resonance.name]\n", " ff_systematics = 100 * syst_decay_rates[resonance.name]\n", " ff_nominal = f\"{ff_systematics[0]:.2f}\"\n", " ff_stat = f\"{ff_statistical.std():.2f}\"\n", " ff_syst_min = f\"{(ff_systematics[1:]-ff_systematics[0]).min():+.2f}\"\n", " ff_syst_max = f\"{(ff_systematics[1:]-ff_systematics[0]).max():+.2f}\"\n", " src += f\"{resonance.latex} & \"\n", " src += Rf\"{ff_nominal} \\pm {ff_stat}_{{{ff_syst_min}}}^{{{ff_syst_max}}} & \"\n", " lhcb_value, lhcb_stat, lhcb_syst, _ = lhcb_values[resonance.name]\n", " src += Rf\"{lhcb_value} \\pm {lhcb_stat} \\pm {lhcb_syst} \\\\\" \"\\n\"\n", "src += R\"\\end{array}\"\n", "Latex(src)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "jupyter": { "source_hidden": true }, "tags": [ "hide-input", "full-width" ] }, "outputs": [], "source": [ "alignment = \"c\".join(\"\" for _ in range(n_models) if i)\n", "names = \" & \".join(Rf\"\\textbf{{{i}}}\" for i in range(n_models) if i)\n", "src = Rf\"\"\"\n", "$$\n", "\\begin{{array}}{{l|{alignment}}}\n", " \\textbf{{Resonance}} & {names} \\\\\n", " \\hline\n", "\"\"\"\n", "for resonance in resonances:\n", " ff_systematics = 100 * syst_decay_rates[resonance.name]\n", " src += f\" {resonance.latex:13s}\"\n", " for ff_model in ff_systematics[1:]:\n", " diff = f\"{ff_model-ff_systematics[0]:+.2f}\"\n", " if ff_model == ff_systematics[1:].min():\n", " src += Rf\" & \\color{{blue}}{{{diff}}}\"\n", " elif ff_model == ff_systematics[1:].max():\n", " src += Rf\" & \\color{{red}}{{{diff}}}\"\n", " else:\n", " src += f\" & {diff}\"\n", " src += R\" \\\\\" \"\\n\"\n", "src += \"\"\"\\\n", "\\\\end{array}\n", "$$\n", "\"\"\"\n", "for i, title in enumerate(models):\n", " src += f\"\\n- **{i}**: {title}\"\n", "Markdown(src)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Average polarimetry values\n", "\n", "The components of the **averaged polarimeter vector** $\\overline{\\alpha}$ are defined as:\n", "\n", "$$\n", "\\overline{\\alpha}_j = \\int I_0\\left(\\tau\\right) \\alpha_j\\left(\\tau\\right) \\mathrm{d}^n \\tau \\; \\big / \\int I_0\\left(\\tau\\right)\\,\\mathrm{d}^n \\tau\n", "$$\n", "\n", "The averages of the norm of $\\vec\\alpha$ are computed as follows:\n", "- $\\left|\\overline{\\alpha}\\right| = \\sqrt{\\overline{\\alpha_x}^2+\\overline{\\alpha_y}^2+\\overline{\\alpha_z}^2}$, with the statistical uncertainties added in quadrature and the systematic uncertainties by taking the same formula on the extrema values of each $\\overline{\\alpha_j}$\n", "- $\\overline{\\left|\\alpha\\right|} = \\sqrt{\\int I_0\\left(\\tau\\right) \\left|\\vec\\alpha\\left(\\tau\\right)\\right|^2 \\mathrm{d}^n \\tau \\; \\big / \\int I_0\\left(\\tau\\right)\\,\\mathrm{d}^n \\tau}$" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "jupyter": { "source_hidden": true }, "tags": [ "hide-input", "scroll-input" ] }, "outputs": [], "source": [ "def compute_weighted_average(v: jnp.ndarray, weights: jnp.ndarray) -> jnp.ndarray:\n", " return jnp.nansum(v * weights, axis=-1) / jnp.nansum(weights, axis=-1)\n", "\n", "\n", "def compute_polar_coordinates(cartesian_vector: jnp.ndarray) -> jnp.ndarray:\n", " x, y, z = cartesian_vector\n", " norm = jnp.sqrt(jnp.sum(cartesian_vector**2, axis=0))\n", " theta = np.arccos(z / norm)\n", " phi = np.pi - np.arctan2(y, -x)\n", " return jnp.array([norm, theta, phi])\n", "\n", "\n", "stat_weighted_alpha_norm = jnp.sqrt(\n", " compute_weighted_average(stat_polarimetry_norm**2, weights=stat_intensities)\n", ")\n", "stat_weighted_alpha = compute_weighted_average(\n", " stat_polarimetry,\n", " weights=stat_intensities,\n", ")\n", "stat_weighted_alpha_polar = compute_polar_coordinates(stat_weighted_alpha)\n", "assert stat_weighted_alpha_norm.shape == (n_bootstraps,)\n", "assert stat_weighted_alpha.shape == (3, n_bootstraps)\n", "assert stat_weighted_alpha_polar.shape == (3, n_bootstraps)\n", "\n", "syst_weighted_alpha_norm = jnp.sqrt(\n", " compute_weighted_average(syst_polarimetry_norm**2, weights=syst_intensities)\n", ")\n", "syst_weighted_alpha = compute_weighted_average(\n", " syst_polarimetry,\n", " weights=syst_intensities,\n", ")\n", "syst_weighted_alpha_polar = compute_polar_coordinates(syst_weighted_alpha)\n", "assert syst_weighted_alpha_norm.shape == (n_models,)\n", "assert syst_weighted_alpha.shape == (3, n_models)\n", "assert syst_weighted_alpha_polar.shape == (3, n_models)\n", "\n", "nominal_weighted_alpha_norm = syst_weighted_alpha_norm[0]\n", "nominal_weighted_alpha = syst_weighted_alpha[:, 0]\n", "nominal_weighted_alpha_polar = syst_weighted_alpha_polar[:, 0]\n", "\n", "syst_weighted_alpha_norm_diff = (\n", " syst_weighted_alpha_norm - nominal_weighted_alpha_norm\n", ")\n", "syst_weighted_alpha_diff = (syst_weighted_alpha.T - nominal_weighted_alpha).T\n", "syst_weighted_alpha_diff_polar = (\n", " syst_weighted_alpha_polar.T - nominal_weighted_alpha_polar\n", ").T\n", "\n", "stat_weighted_alpha_std = stat_weighted_alpha.std(axis=1)\n", "syst_weighted_alpha_min = syst_weighted_alpha_diff.min(axis=1)\n", "syst_weighted_alpha_max = syst_weighted_alpha_diff.max(axis=1)\n", "stat_weighted_alpha_polar_std = stat_weighted_alpha_polar.std(axis=1)\n", "syst_weighted_alpha_polar_min = syst_weighted_alpha_diff_polar.min(axis=1)\n", "syst_weighted_alpha_polar_max = syst_weighted_alpha_diff_polar.max(axis=1)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "**Cartesian coordinates**:" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "jupyter": { "source_hidden": true }, "tags": [ "hide-input", "scroll-input" ] }, "outputs": [], "source": [ "def render_cartesian_uncertainties(\n", " value, stat, syst_min, syst_max, plus: bool = True\n", ") -> str:\n", " value *= 1e3\n", " stat *= 1e3\n", " syst_min *= 1e3\n", " syst_max *= 1e3\n", " if plus:\n", " val = f\"{value:+.1f}\"\n", " else:\n", " val = f\"{value:.1f}\"\n", " stat = f\"{stat:.1f}\"\n", " syst_min = f\"-{abs(syst_min):.1f}\"\n", " syst_max = f\"+{abs(syst_max):.1f}\"\n", " return (\n", " Rf\"\\left({val} \\pm {stat}_{{{syst_min}}}^{{{syst_max}}} \\right) \\times\"\n", " R\" 10^{-3}\"\n", " )\n", "\n", "\n", "src = R\"\"\"\n", "\\begin{array}{ccr}\n", "\"\"\"\n", "for i in range(3):\n", " value = render_cartesian_uncertainties(\n", " nominal_weighted_alpha[i],\n", " stat_weighted_alpha_std[i],\n", " syst_weighted_alpha_min[i],\n", " syst_weighted_alpha_max[i],\n", " )\n", " src += Rf\" \\overline{{\\alpha_{'xyz'[i]}}} & = & {value} \\\\\" \"\\n\"\n", "\n", "value = render_cartesian_uncertainties(\n", " nominal_weighted_alpha_norm,\n", " stat_weighted_alpha_norm.std(),\n", " syst_weighted_alpha_norm_diff.min(),\n", " syst_weighted_alpha_norm_diff.max(),\n", " plus=False,\n", ")\n", "src += Rf\" \\overline{{\\left|\\alpha\\right|}} & = & {value} \\\\\"\n", "src += R\"\\end{array}\"\n", "Latex(src)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "**Polar coordinates**:\n", "\n", "$$\n", "\\begin{array}{lcl}\n", "\\theta\\left(\\vec\\alpha\\right) &=& \\arccos\\left(\\alpha_z \\, \\big/ \\left|\\alpha\\right|\\right) \\\\\n", "\\phi\\left(\\vec\\alpha\\right) &=& \\pi - \\mathrm{atan2}\\left(\\alpha_y, -\\alpha_x\\right)\n", "\\end{array}\n", "$$" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "jupyter": { "source_hidden": true }, "tags": [ "hide-input", "scroll-input" ] }, "outputs": [], "source": [ "def render_radian_angle_uncertainties(value, stat, syst_min, syst_max) -> str:\n", " val = f\"{value:+.2f}\"\n", " stat = f\"{stat:.2f}\"\n", " syst_min = f\"-{abs(syst_min):.2f}\"\n", " syst_max = f\"+{abs(syst_max):.2f}\"\n", " return Rf\"{val} \\pm {stat}_{{{syst_min}}}^{{{syst_max}}}\\;\\mathrm{{rad}}\"\n", "\n", "\n", "def render_angle_uncertainties(value, stat, syst_min, syst_max) -> str:\n", " value /= np.pi\n", " stat /= np.pi\n", " syst_min /= np.pi\n", " syst_max /= np.pi\n", " val = f\"{value:+.3f}\"\n", " stat = f\"{stat:.3f}\"\n", " syst_min = f\"-{abs(syst_min):.3f}\"\n", " syst_max = f\"+{abs(syst_max):.3f}\"\n", " return (\n", " Rf\"\\left({val} \\pm {stat}_{{{syst_min}}}^{{{syst_max}}} \\right) \\times \\pi\"\n", " )\n", "\n", "\n", "src = R\"\"\"\n", "\\begin{array}{ccl}\n", "\"\"\"\n", "labels = [\n", " R\"\\left|\\overline{\\alpha}\\right|\",\n", " R\"\\theta\\left(\\overline{\\alpha}\\right)\",\n", " R\"\\phi\\left(\\overline{\\alpha}\\right)\",\n", "]\n", "for i, label in enumerate(labels):\n", " renderer = (\n", " render_cartesian_uncertainties\n", " if i == 0\n", " else render_radian_angle_uncertainties\n", " )\n", " args = (\n", " nominal_weighted_alpha_polar[i],\n", " stat_weighted_alpha_polar_std[i],\n", " syst_weighted_alpha_polar_min[i],\n", " syst_weighted_alpha_polar_max[i],\n", " )\n", " src += Rf\" {label} & = & {renderer(*args)} \\\\\" \"\\n\"\n", " if i > 0:\n", " src += Rf\" & = & {render_angle_uncertainties(*args)} \\\\\" \"\\n\"\n", "\n", "src += R\"\\end{array}\"\n", "Latex(src)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Averaged polarimeter values for each model (and the difference with the nominal model):" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "jupyter": { "source_hidden": true }, "tags": [ "hide-input" ] }, "outputs": [], "source": [ "src = R\"\"\"\n", "\\begin{array}{r|rrrr|rrrr}\n", " \\textbf{Model}\n", " & \\overline{\\alpha}_x & \\overline{\\alpha}_y & \\overline{\\alpha}_z & \\overline{\\left|\\alpha\\right|}\n", " & \\Delta\\overline{\\alpha}_x & \\Delta\\overline{\\alpha}_y & \\Delta\\overline{\\alpha}_z\n", " & \\Delta\\overline{\\left|\\alpha\\right|} \\\\\n", " \\hline\n", "\"\"\"\n", "for i, title in enumerate(models):\n", " α = 1e3 * syst_weighted_alpha[:, i]\n", " abs_α = 1e3 * syst_weighted_alpha_norm[i]\n", " Δα = 1e3 * syst_weighted_alpha_diff[:, i]\n", " abs_Δα = 1e3 * syst_weighted_alpha_norm_diff[i]\n", " src += Rf\" \\textbf{{{i}}}\"\n", " src += Rf\" & {α[0]:+.1f} & {α[1]:+.1f} & {α[2]:+.1f} & {abs_α:.1f}\"\n", " if i != 0:\n", " src += Rf\" & {Δα[0]:+.1f} & {Δα[1]:+.1f} & {Δα[2]:+.1f} & {abs_Δα:+.1f}\"\n", " src += R\" \\\\\" \"\\n\"\n", " del i, title, α, abs_α, Δα, abs_Δα\n", "src += R\"\\end{array}\"\n", "Latex(src)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "jupyter": { "source_hidden": true }, "tags": [ "hide-input" ] }, "outputs": [], "source": [ "src = R\"\"\"\n", "\\begin{array}{r|rrr|rrr}\n", " \\textbf{Model}\n", " & 10^3 \\cdot \\left|\\overline{\\alpha}\\right|\n", " & \\theta\\left(\\overline{\\alpha}\\right) / \\pi\n", " & \\phi\\left(\\overline{\\alpha}\\right) / \\pi\n", " & 10^3 \\cdot \\Delta\\left|\\overline{\\alpha}\\right|\n", " & \\Delta\\theta\\left(\\overline{\\alpha}\\right) / \\pi\n", " & \\Delta\\phi\\left(\\overline{\\alpha}\\right) / \\pi \\\\\n", " \\hline\n", "\"\"\"\n", "for i, title in enumerate(models):\n", " α, θ, φ = syst_weighted_alpha_polar[:, i]\n", " Δα, Δθ, Δφ = syst_weighted_alpha_diff_polar[:, i]\n", " src += Rf\" \\textbf{{{i}}}\"\n", " src += Rf\" & {1e3*α:+.1f} & {θ/np.pi:+.3f} & {φ/np.pi:+.3f}\"\n", " if i != 0:\n", " src += Rf\" & {1e3*Δα:+.1f} & {Δθ/np.pi:+.3f} & {Δφ/np.pi:+.3f}\"\n", " src += R\" \\\\\" \"\\n\"\n", " del i, title, α, θ, φ, Δα, Δθ, Δφ\n", "src += R\"\\end{array}\"\n", "Latex(src)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ ":::{tip}\n", "These values can be downloaded in serialized JSON format under {ref}`uncertainties:Exported distributions`.\n", ":::" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "jupyter": { "source_hidden": true }, "tags": [ "hide-input", "full-width", "scroll-input" ] }, "outputs": [], "source": [ "alpha_x_reversed = syst_weighted_alpha[0, ::-1]\n", "alpha_y_reversed = syst_weighted_alpha[1, ::-1]\n", "alpha_z_reversed = syst_weighted_alpha[2, ::-1]\n", "alpha_norm_reversed = syst_weighted_alpha_polar[0, ::-1]\n", "alpha_theta_reversed = syst_weighted_alpha_polar[1, ::-1]\n", "alpha_phi_reversed = syst_weighted_alpha_polar[2, ::-1]\n", "marker_options = dict(\n", " color=(n_models - 1) * [\"blue\"] + [\"red\"],\n", " size=(n_models - 1) * [7] + [10],\n", " opacity=0.7,\n", ")\n", "model_titles_reversed = [\n", " \"
\".join(wrap(title, width=60)) for title in reversed(models)\n", "]\n", "\n", "fig = make_subplots(cols=2, shared_yaxes=True)\n", "fig.add_trace(\n", " go.Scatter(\n", " x=alpha_phi_reversed,\n", " y=alpha_theta_reversed,\n", " hovertemplate=\"%{text}
\"\n", " + \"ϕ(ɑ̅) = %{x:.3f}, \"\n", " + \"θ(ɑ̅) = %{y:.3f}\"\n", " + \"\", # hide trace box\n", " mode=\"markers\",\n", " marker=marker_options,\n", " text=model_titles_reversed,\n", " ),\n", " col=1,\n", " row=1,\n", ")\n", "fig.add_trace(\n", " go.Scatter(\n", " x=alpha_norm_reversed,\n", " y=alpha_theta_reversed,\n", " hovertemplate=\"%{text}
\"\n", " + \"|ɑ̅| = %{x:.3f}, \"\n", " + \"θ(ɑ̅) = %{y:.3f}\"\n", " + \"\", # hide trace box\n", " mode=\"markers\",\n", " marker=marker_options,\n", " text=model_titles_reversed,\n", " ),\n", " col=2,\n", " row=1,\n", ")\n", "fig.update_layout(\n", " height=600,\n", " showlegend=False,\n", " title_text=\"Averaged ɑ̅ systematics distribution (polar)\",\n", " xaxis=dict(\n", " title=\"ϕ(ɑ̅)\",\n", " tickmode=\"array\",\n", " tickvals=np.array([0.9, 0.95, 1, 1.05]) * np.pi,\n", " ticktext=[\"0.9 π\", \"0.95 π\", \"π\", \"1.05 π\"],\n", " ),\n", " yaxis=dict(\n", " title=\"θ(ɑ̅)\",\n", " tickmode=\"array\",\n", " tickvals=np.array([0.90, 0.91, 0.92, 0.93, 0.94, 0.95]) * np.pi,\n", " ticktext=[\"0.90 π\", \"0.91 π\", \"0.92 π\", \"0.93 π\", \"0.94 π\", \"0.95 π\"],\n", " ),\n", " xaxis2=dict(title=\"|ɑ̅|\"),\n", ")\n", "fig.show()\n", "\n", "fig = go.Figure(\n", " data=go.Scatter3d(\n", " x=alpha_x_reversed,\n", " y=alpha_y_reversed,\n", " z=alpha_z_reversed,\n", " hovertemplate=\"%{text}
\"\n", " + \"ɑ̅x = %{x:.3f}, \"\n", " + \"ɑ̅y = %{y:.3f}, \"\n", " + \"ɑ̅z = %{z:.3f}\"\n", " + \"\", # hide trace box\n", " mode=\"markers\",\n", " marker=marker_options,\n", " text=model_titles_reversed,\n", " ),\n", ")\n", "fig.update_layout(\n", " width=700,\n", " height=700,\n", " scene=dict(\n", " aspectmode=\"cube\",\n", " xaxis_title=\"ɑ̅x\",\n", " yaxis_title=\"ɑ̅y\",\n", " zaxis_title=\"ɑ̅z\",\n", " ),\n", " showlegend=False,\n", " title_text=R\"Average ɑ̅ systematics distribution (cartesian)\",\n", ")\n", "fig.show()" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "jupyter": { "source_hidden": true }, "tags": [ "hide-input", "scroll-input" ] }, "outputs": [], "source": [ "def axis_formatter(decimals: int = 10):\n", " # https://gist.github.com/taxus-d/aa69e43c3d8b804864ede4a8c056e9cd\n", " def formatter(val, pos) -> str:\n", " fraction = np.round(val / np.pi, decimals)\n", " if fraction == 1:\n", " return R\"$\\pi$\"\n", " return Rf\"${fraction:g} \\pi$\"\n", "\n", " return formatter\n", "\n", "\n", "%config InlineBackend.figure_formats = ['svg']\n", "plt.rcdefaults()\n", "use_mpl_latex_fonts()\n", "plt.rc(\"font\", size=15)\n", "fig, (ax1, ax2) = plt.subplots(figsize=(11, 5), ncols=2, sharey=True)\n", "fig.suptitle(R\"$\\vec{\\overline{\\alpha}}$ statistics distribution (polar)\", y=0.95)\n", "fig.subplots_adjust(wspace=0.05)\n", "\n", "norm, theta, phi = nominal_weighted_alpha_polar\n", "ax1.set_xlabel(R\"$\\phi\\left(\\overline{\\alpha}\\right)$\")\n", "ax1.set_ylabel(R\"$\\theta\\left(\\overline{\\alpha}\\right)$\")\n", "ax2.set_xlabel(R\"$\\left|\\overline{\\alpha}\\right|$\")\n", "ax1.xaxis.set_major_locator(MultipleLocator(base=0.05 * np.pi))\n", "ax1.xaxis.set_major_formatter(FuncFormatter(axis_formatter(decimals=2)))\n", "ax1.yaxis.set_major_locator(MultipleLocator(base=0.005 * np.pi))\n", "ax1.yaxis.set_major_formatter(FuncFormatter(axis_formatter(decimals=3)))\n", "ax1.scatter(\n", " x=stat_weighted_alpha_polar[2], # phi\n", " y=stat_weighted_alpha_polar[1], # theta\n", " s=2,\n", ")\n", "ax1.scatter(phi, theta, c=\"red\", marker=\"*\")\n", "ax1.annotate(\n", " Rf\"$\\left({norm:.3f}, {theta/np.pi:+.3f}\\pi, {phi/np.pi:+.3f}\\pi\\right)$\",\n", " xy=(phi, theta),\n", " color=\"red\",\n", " fontsize=12,\n", ")\n", "\n", "ax2.scatter(\n", " x=stat_weighted_alpha_polar[0], # norm\n", " y=stat_weighted_alpha_polar[1], # theta\n", " s=2,\n", " label=\"Bootstraps\",\n", ")\n", "ax2.scatter(norm, theta, c=\"red\", marker=\"*\", label=\"Nominal model\")\n", "ax2.annotate(\n", " Rf\"$\\left({norm:.3f}, {theta/np.pi:+.3f}\\pi, {phi/np.pi:+.3f}\\pi\\right)$\",\n", " xy=(norm, theta),\n", " color=\"red\",\n", " fontsize=12,\n", ")\n", "ax2.legend(\n", " bbox_to_anchor=(0.99, 0.18),\n", " loc=\"upper right\",\n", " framealpha=1,\n", " prop={\"size\": 12},\n", ")\n", "\n", "fig.savefig(\"_images/correlation-statistics.svg\")\n", "plt.show()" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "jupyter": { "source_hidden": true }, "tags": [ "hide-input", "scroll-input" ] }, "outputs": [], "source": [ "%config InlineBackend.figure_formats = ['svg']\n", "fig, (ax1, ax2) = plt.subplots(figsize=(11, 5), ncols=2, sharey=True)\n", "fig.suptitle(R\"$\\vec{\\overline{\\alpha}}$ systematics distribution (polar)\", y=0.95)\n", "fig.subplots_adjust(wspace=0.05)\n", "\n", "ax1.set_xlabel(R\"$\\phi\\left(\\overline{\\alpha}\\right)$\")\n", "ax1.set_ylabel(R\"$\\theta\\left(\\overline{\\alpha}\\right)$\")\n", "ax2.set_xlabel(R\"$\\left|\\overline{\\alpha}\\right|$\")\n", "ax1.xaxis.set_major_locator(MultipleLocator(base=0.05 * np.pi))\n", "ax1.xaxis.set_major_formatter(FuncFormatter(axis_formatter(decimals=2)))\n", "ax1.yaxis.set_major_locator(MultipleLocator(base=0.01 * np.pi))\n", "ax1.yaxis.set_major_formatter(FuncFormatter(axis_formatter(decimals=2)))\n", "ax1.scatter(\n", " x=syst_weighted_alpha_polar[2][1:],\n", " y=syst_weighted_alpha_polar[1][1:],\n", " marker=\".\",\n", ")\n", "ax1.scatter(phi, theta, c=\"red\", marker=\"*\")\n", "ax1.annotate(\n", " Rf\"$\\left({norm:.3f}, {theta/np.pi:+.3f}\\pi, {phi/np.pi:+.3f}\\pi\\right)$\",\n", " xy=(phi, theta),\n", " color=\"red\",\n", " fontsize=12,\n", ")\n", "\n", "ax2.scatter(\n", " x=syst_weighted_alpha_polar[0][1:],\n", " y=syst_weighted_alpha_polar[1][1:],\n", " marker=\".\",\n", " label=\"Alternative models\",\n", ")\n", "ax2.scatter(norm, theta, c=\"red\", marker=\"*\", label=\"Nominal model\")\n", "ax2.annotate(\n", " Rf\"$\\left({norm:.3f}, {theta/np.pi:+.3f}\\pi, {phi/np.pi:+.3f}\\pi\\right)$\",\n", " xy=(norm, theta),\n", " color=\"red\",\n", " fontsize=12,\n", ")\n", "ax2.legend(\n", " bbox_to_anchor=(1.05, 1.1),\n", " loc=\"upper right\",\n", " framealpha=1,\n", " prop={\"size\": 12},\n", ")\n", "fig.savefig(\"_images/correlation-systematics.svg\")\n", "plt.show()" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "jupyter": { "source_hidden": true }, "tags": [ "hide-input" ] }, "outputs": [], "source": [ "def plot_correlation_matrix_polar(matrix, ax):\n", " ax.set_box_aspect(1)\n", " ax.matshow(matrix, cmap=cm.coolwarm, vmin=-1, vmax=+1)\n", " for i, row in enumerate(matrix):\n", " for j, value in enumerate(row):\n", " ax.text(i, j, f\"{value:.3f}\", ha=\"center\", va=\"center\")\n", " ticks = [i for i in range(3)]\n", " tick_labels = [\n", " R\"$\\left|\\overline{\\alpha}\\right|$\",\n", " R\"$\\theta\\left(\\overline{\\alpha}\\right)$\",\n", " R\"$\\phi\\left(\\overline{\\alpha}\\right)$\",\n", " ]\n", " ax.set_xticks(ticks)\n", " ax.set_xticklabels(tick_labels)\n", " ax.set_yticks(ticks)\n", " ax.set_yticklabels(tick_labels)\n", "\n", "\n", "assert stat_weighted_alpha.shape == (3, n_bootstraps)\n", "assert syst_weighted_alpha.shape == (3, n_models)\n", "\n", "%config InlineBackend.figure_formats = ['svg']\n", "fig, (ax1, ax2) = plt.subplots(figsize=(9, 5), ncols=2)\n", "fig.suptitle(R\"Correlation matrices for $\\vec{\\overline{\\alpha}}$ (\\textbf{polar})\")\n", "ax1.set_title(R\"\\textbf{statistics}\")\n", "ax2.set_title(R\"\\textbf{systematics}\")\n", "plot_correlation_matrix_polar(np.corrcoef(stat_weighted_alpha_polar), ax1)\n", "plot_correlation_matrix_polar(np.corrcoef(syst_weighted_alpha_polar), ax2)\n", "fig.savefig(\"_images/correlation-matrices.svg\")\n", "plt.show()" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "jupyter": { "source_hidden": true }, "tags": [ "hide-input" ] }, "outputs": [], "source": [ "def plot_correlation_matrix(matrix, ax):\n", " ax.set_box_aspect(1)\n", " ax.matshow(matrix, cmap=cm.coolwarm, vmin=-1, vmax=+1)\n", " for i, row in enumerate(matrix):\n", " for j, value in enumerate(row):\n", " ax.text(i, j, f\"{value:.3f}\", ha=\"center\", va=\"center\")\n", " ticks = [i for i in range(3)]\n", " tick_labels = [Rf\"$\\overline{{\\alpha}}_{i}$\" for i in \"xyz\"]\n", " ax.set_xticks(ticks)\n", " ax.set_xticklabels(tick_labels)\n", " ax.set_yticks(ticks)\n", " ax.set_yticklabels(tick_labels)\n", "\n", "\n", "assert stat_weighted_alpha.shape == (3, n_bootstraps)\n", "assert syst_weighted_alpha.shape == (3, n_models)\n", "\n", "%config InlineBackend.figure_formats = ['svg']\n", "fig, (ax1, ax2) = plt.subplots(figsize=(9, 5), ncols=2)\n", "fig.suptitle(\n", " R\"Correlation matrices for $\\vec{\\overline{\\alpha}}$ (\\textbf{cartesian})\"\n", ")\n", "ax1.set_title(R\"\\textbf{statistical \\& systematic}\")\n", "ax2.set_title(R\"\\textbf{model}\")\n", "plot_correlation_matrix(np.corrcoef(stat_weighted_alpha), ax1)\n", "plot_correlation_matrix(np.corrcoef(syst_weighted_alpha), ax2)\n", "fig.savefig(\"_images/correlation-matrices.svg\")\n", "plt.show()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ ":::{only} latex\n", "```{container} full-width\n", "![](_images/correlation-statistics.svg)\n", "![](_images/correlation-systematics.svg)\n", "![](_images/correlation-matrices.svg)\n", "```\n", ":::\n", "\n", ":::{tip}\n", "A potential explanation for the $xz$-correlation may be found in Section {ref}`alpha-xz-correlations-per-resonance`.\n", ":::\n", "\n", "(exported-distributions)=\n", "\n", "## Exported distributions" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "jupyter": { "source_hidden": true }, "mystnb": { "code_prompt_show": "Export averaged polarimeter vectors" }, "tags": [ "hide-input" ] }, "outputs": [], "source": [ "json_data = {\n", " \"file description\": \"Averaged polarimeter vector for each alternative model\",\n", " \"model descriptions\": list(models),\n", " \"reference subsystem\": reference_subsystem,\n", " \"systematics\": {\n", " \"cartesian\": {\n", " \"x\": syst_weighted_alpha[0].tolist(),\n", " \"y\": syst_weighted_alpha[1].tolist(),\n", " \"z\": syst_weighted_alpha[2].tolist(),\n", " \"norm\": syst_weighted_alpha.tolist(),\n", " },\n", " \"polar\": {\n", " \"norm\": syst_weighted_alpha_polar[0].tolist(),\n", " \"theta\": syst_weighted_alpha_polar[1].tolist(),\n", " \"phi\": syst_weighted_alpha_polar[2].tolist(),\n", " },\n", " },\n", " \"statistics\": {\n", " \"cartesian\": {\n", " \"x\": stat_weighted_alpha[0].tolist(),\n", " \"y\": stat_weighted_alpha[1].tolist(),\n", " \"z\": stat_weighted_alpha[2].tolist(),\n", " \"norm\": stat_weighted_alpha.tolist(),\n", " },\n", " \"polar\": {\n", " \"norm\": stat_weighted_alpha_polar[0].tolist(),\n", " \"theta\": stat_weighted_alpha_polar[1].tolist(),\n", " \"phi\": stat_weighted_alpha_polar[2].tolist(),\n", " },\n", " },\n", "}\n", "json_averaged = \"_static/export/averaged-polarimeter-vectors.json\"\n", "with open(json_averaged, \"w\") as f:\n", " json.dump(json_data, f, indent=2)\n", "del json_data" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "mystnb": { "code_prompt_show": "Define Dalitz grid" }, "tags": [ "hide-cell" ] }, "outputs": [], "source": [ "grid_resolution = 100\n", "grid_sample = generate_meshgrid_sample(model.decay, grid_resolution)\n", "grid_sample = transformer(grid_sample)\n", "X_export = grid_sample[\"sigma1\"]\n", "Y_export = grid_sample[\"sigma2\"]" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "jupyter": { "source_hidden": true }, "mystnb": { "code_prompt_show": "Export fields as JSON" }, "tags": [ "hide-cell", "scroll-input" ] }, "outputs": [], "source": [ "def format_subsystem(reference_subsystem) -> str:\n", " subsystem_names = {\n", " 1: R\"K^{**} \\to \\pi^+ K^-\",\n", " 2: R\"\\Lambda^{**} \\to p K^-\",\n", " 3: R\"\\Delta^{**} \\to p \\pi^+\",\n", " }\n", " name = subsystem_names[reference_subsystem]\n", " return f\"{reference_subsystem}: {name}\"\n", "\n", "\n", "STAT_FILENAMES = []\n", "for i in tqdm(range(n_bootstraps), disable=NO_TQDM):\n", " new_parameters = {k: v[i] for k, v in bootstrap_parameters.items()}\n", " for key, func in nominal_functions.items():\n", " func.update_parameters(nominal_parameters)\n", " func.update_parameters(new_parameters)\n", " filename = f\"_static/export/polarimetry-field-bootstrap-{i}.json\"\n", " export_polarimetry_field(\n", " sigma1=X_export[0],\n", " sigma2=Y_export[:, 0],\n", " intensity=nominal_functions[\"intensity\"](grid_sample).real,\n", " alpha_x=nominal_functions[\"alpha_x\"](grid_sample).real,\n", " alpha_y=nominal_functions[\"alpha_y\"](grid_sample).real,\n", " alpha_z=nominal_functions[\"alpha_z\"](grid_sample).real,\n", " filename=filename,\n", " metadata={\n", " \"model description\": nominal_model_title,\n", " \"parameters\": {k: f\"{v}\" for k, v in new_parameters.items()},\n", " \"reference subsystem\": format_subsystem(reference_subsystem),\n", " },\n", " )\n", " STAT_FILENAMES.append(filename)\n", "\n", "items = list(enumerate(jax_functions.items()))\n", "SYST_FILENAMES = []\n", "for i, (title, funcs) in tqdm(items, disable=NO_TQDM):\n", " for func in funcs.values():\n", " func.update_parameters(original_parameters[title])\n", " filename = f\"_static/export/polarimetry-field-model-{i}.json\"\n", " export_polarimetry_field(\n", " sigma1=X_export[0],\n", " sigma2=Y_export[:, 0],\n", " intensity=funcs[\"intensity\"](grid_sample).real,\n", " alpha_x=funcs[\"alpha_x\"](grid_sample).real,\n", " alpha_y=funcs[\"alpha_y\"](grid_sample).real,\n", " alpha_z=funcs[\"alpha_z\"](grid_sample).real,\n", " filename=filename,\n", " metadata={\n", " \"model description\": title,\n", " \"parameters\": {k: f\"{v}\" for k, v in func.parameters.items()},\n", " \"reference subsystem\": format_subsystem(reference_subsystem),\n", " },\n", " )\n", " SYST_FILENAMES.append(filename)" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "jupyter": { "source_hidden": true }, "mystnb": { "code_prompt_show": "Merge into one TAR/JSON file" }, "tags": [ "hide-input" ] }, "outputs": [], "source": [ "models_key = \"models\"\n", "bootstraps_key = \"bootstraps\"\n", "s1_key = \"m^2_Kpi\"\n", "s2_key = \"m^2_pK\"\n", "combined_json = {\n", " models_key: [],\n", " bootstraps_key: [],\n", "}\n", "\n", "for filename in SYST_FILENAMES:\n", " with open(filename) as f:\n", " data = json.load(f)\n", " s1_values = data[s1_key]\n", " s2_values = data[s2_key]\n", " del data[s1_key]\n", " del data[s2_key]\n", " combined_json[models_key].append(data)\n", "\n", "for filename in STAT_FILENAMES:\n", " with open(filename) as f:\n", " data = json.load(f)\n", " del data[s1_key]\n", " del data[s2_key]\n", " combined_json[bootstraps_key].append(data)\n", "\n", "combined_json[s1_key] = s1_values\n", "combined_json[s2_key] = s2_values\n", "\n", "json_file = \"_static/export/polarimetry-field.json\"\n", "with open(json_file, \"w\") as f:\n", " json.dump(combined_json, f)\n", "\n", "tar_file = \"_static/export/polarimetry-field.tar.gz\"\n", "with tarfile.open(tar_file, \"w:gz\") as tar:\n", " tar.add(\"_static/export/README.md\", arcname=\"README.md\")\n", " for path in STAT_FILENAMES + SYST_FILENAMES:\n", " tar.add(path, arcname=os.path.basename(path))" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "jupyter": { "source_hidden": true }, "tags": [ "remove-input" ] }, "outputs": [], "source": [ "src = f\"\"\"\n", "::::{{only}} html\n", ":::{{dropdown}} Exported {grid_resolution}x{grid_resolution} JSON grids for each bootstrap (*statistics & systematics*)\n", "\"\"\"\n", "for i, filename in enumerate(STAT_FILENAMES, 1):\n", " src += f\"{i}. [`{os.path.basename(filename)}`]({filename})\\n\"\n", "src += f\"\"\"\\\n", ":::\n", "\n", ":::{{dropdown}} Exported {grid_resolution}x{grid_resolution} JSON grids for each *model*\n", "\"\"\"\n", "for title, filename in zip(models, SYST_FILENAMES):\n", " src += f\"- [[download]]({filename}) {title}\\n\"\n", "src += \"\"\"\\\n", ":::\n", "\"\"\"\n", "\n", "bytes_averaged = os.path.getsize(json_averaged)\n", "bytes_json = os.path.getsize(json_file)\n", "bytes_tar = os.path.getsize(tar_file)\n", "src += f\"\"\"\n", ":::{{card}} All data combined can be downloaded here\n", "```{{button-link}} {json_averaged}\n", ":color: primary\n", ":outline:\n", "{{octicon}}`file-code` **{os.path.basename(json_averaged)}** ({1e-3*bytes_averaged:,.1f} kB)\n", "```\n", "```{{button-link}} {json_file}\n", ":color: primary\n", ":outline:\n", "{{octicon}}`file-code` **{os.path.basename(json_file)}** ({1e-6*bytes_json:,.1f} MB)\n", "```\n", "```{{button-link}} {tar_file}\n", ":color: primary\n", ":outline:\n", "{{octicon}}`file-zip` **{os.path.basename(tar_file)}** ({1e-6*bytes_tar:,.1f} MB)\n", "```\n", ":::\n", "::::\n", "\"\"\"\n", "\n", "src += \"\"\"\n", ":::{only} latex\n", "The polarimetry fields are computed for each parameter bootstrap (statistics & systematics) and for each model on\n", "[lc2pkpi-polarimetry.docs.cern.ch/uncertainties.html](https://lc2pkpi-polarimetry.docs.cern.ch/uncertainties.html).\n", "All combined fields can be downloaded as single compressed TAR file under\n", "[lc2pkpi-polarimetry.docs.cern.ch/_static/export/polarimetry-field.json](https://lc2pkpi-polarimetry.docs.cern.ch/_static/export/polarimetry-field.json)\n", "and as a single JSON file under\n", "[lc2pkpi-polarimetry.docs.cern.ch/_static/export/polarimetry-field.tar.gz](https://lc2pkpi-polarimetry.docs.cern.ch/_static/export/polarimetry-field.tar.gz).\n", ":::\n", "\"\"\"\n", "Markdown(src)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ ":::{tip}\n", "See {ref}`appendix/serialization:Import and interpolate` for how to use these grids in an an analysis and see {doc}`/zz.polarization-fit` for how to use these fields to determine the polarization from a measured distribution.\n", ":::" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.8.15" }, "myst": { "all_links_external": true } }, "nbformat": 4, "nbformat_minor": 4 }