{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# Average polarimeter per resonance" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "```{autolink-concat}\n", "```" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "jupyter": { "source_hidden": true }, "mystnb": { "code_prompt_show": "Import Python libraries" }, "tags": [ "hide-cell", "scroll-input" ] }, "outputs": [], "source": [ "from __future__ import annotations\n", "\n", "import logging\n", "import os\n", "import sys\n", "from collections import defaultdict\n", "from functools import lru_cache\n", "from math import ceil, sqrt\n", "from textwrap import dedent, wrap\n", "\n", "import matplotlib.pyplot as plt\n", "import numpy as np\n", "import plotly.graph_objects as go\n", "import sympy as sp\n", "import yaml\n", "from ampform.io import aslatex\n", "from ampform.sympy import PoolSum\n", "from IPython.display import Latex, Math\n", "from plotly.subplots import make_subplots\n", "from tensorwaves.interface import ParametrizedFunction\n", "from tqdm.auto import tqdm\n", "\n", "from polarimetry import formulate_polarimetry\n", "from polarimetry.amplitude import AmplitudeModel, simplify_latex_rendering\n", "from polarimetry.data import create_data_transformer, generate_phasespace_sample\n", "from polarimetry.decay import Particle\n", "from polarimetry.function import compute_sub_function\n", "from polarimetry.io import perform_cached_doit, perform_cached_lambdify\n", "from polarimetry.lhcb import (\n", " ParameterBootstrap,\n", " ParameterType,\n", " flip_production_coupling_signs,\n", " load_model_builder,\n", " load_model_parameters,\n", ")\n", "from polarimetry.lhcb.particle import load_particles\n", "\n", "if sys.version_info < (3, 8):\n", " from typing_extensions import Literal\n", "else:\n", " from typing import Literal\n", "\n", "SubSystemID = Literal[1, 2, 3]\n", "\n", "\n", "simplify_latex_rendering()\n", "FUNCTION_CACHE: dict[sp.Expr, ParametrizedFunction] = {}\n", "MODEL_FILE = \"../data/model-definitions.yaml\"\n", "PARTICLES = load_particles(\"../data/particle-definitions.yaml\")\n", "\n", "GITLAB_CI = \"GIT_PAGER\" in os.environ\n", "BACKEND = \"numpy\" if GITLAB_CI else \"jax\"\n", "\n", "NO_TQDM = \"EXECUTE_NB\" in os.environ\n", "if NO_TQDM:\n", " logging.getLogger().setLevel(logging.ERROR)\n", " logging.getLogger(\"polarimetry.io\").setLevel(logging.ERROR)\n", "\n", "\n", "@lru_cache(maxsize=None)\n", "def doit(expr: PoolSum) -> sp.Expr:\n", " return perform_cached_doit(expr)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Computations" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "jupyter": { "source_hidden": true }, "mystnb": { "code_prompt_show": "Formulate models" }, "tags": [ "hide-input", "remove-output" ] }, "outputs": [], "source": [ "def formulate_all_models() -> dict[str, dict[SubSystemID, AmplitudeModel]]:\n", " with open(MODEL_FILE) as f:\n", " data = yaml.load(f, Loader=yaml.SafeLoader)\n", " allowed_model_titles = list(data)\n", " models = defaultdict(dict)\n", " for title in tqdm(\n", " allowed_model_titles,\n", " desc=\"Formulate models\",\n", " disable=NO_TQDM,\n", " ):\n", " builder = load_model_builder(MODEL_FILE, PARTICLES, title)\n", " imported_parameters = load_model_parameters(\n", " MODEL_FILE, builder.decay, title, PARTICLES\n", " )\n", " for reference_subsystem in (1, 2, 3):\n", " model = builder.formulate(reference_subsystem)\n", " model.parameter_defaults.update(imported_parameters)\n", " models[title][reference_subsystem] = model\n", " models[title][2] = flip_production_coupling_signs(\n", " models[title][2], [\"K\", \"L\"]\n", " )\n", " models[title][3] = flip_production_coupling_signs(\n", " models[title][3], [\"K\", \"D\"]\n", " )\n", " return {i: {k: v for k, v in dct.items()} for i, dct in models.items()}\n", "\n", "\n", "MODELS = formulate_all_models()\n", "NOMINAL_MODEL_TITLE = \"Default amplitude model\"\n", "NOMINAL_MODEL = MODELS[NOMINAL_MODEL_TITLE]\n", "DECAY = NOMINAL_MODEL[1].decay" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "jupyter": { "source_hidden": true }, "mystnb": { "code_prompt_show": "Unfold symbolic expressions" }, "tags": [ "hide-input", "remove-output" ] }, "outputs": [], "source": [ "def unfold_expressions() -> (\n", " tuple[\n", " dict[str, sp.Expr],\n", " dict[str, sp.Expr],\n", " dict[str, dict[int, sp.Expr]],\n", " ]\n", "):\n", " intensity_exprs = {}\n", " alpha_x_exprs = {}\n", " alpha_z_exprs = defaultdict(dict)\n", " for title, ref_models in tqdm(\n", " MODELS.items(),\n", " desc=\"Unfolding expressions\",\n", " disable=NO_TQDM,\n", " ):\n", " model = ref_models[1]\n", " intensity_exprs[title] = unfold(model.intensity, model)\n", " for ref, model in tqdm(ref_models.items(), disable=NO_TQDM, leave=False):\n", " builder = load_model_builder(MODEL_FILE, PARTICLES, model_id=title)\n", " alpha_x, _, alpha_z = formulate_polarimetry(builder, ref)\n", " if ref == 1:\n", " alpha_x_exprs[title] = unfold(alpha_x, model)\n", " alpha_z_exprs[title][ref] = unfold(alpha_z, model)\n", " return (\n", " intensity_exprs,\n", " alpha_x_exprs,\n", " {i: dict(dct) for i, dct in alpha_z_exprs.items()},\n", " )\n", "\n", "\n", "def unfold(expr: sp.Expr, model: AmplitudeModel) -> sp.Expr:\n", " return doit(doit(expr).xreplace(model.amplitudes))\n", "\n", "\n", "INTENSITY_EXPRS, ALPHA_X_EXPRS, ALPHA_Z_EXPRS = unfold_expressions()" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "jupyter": { "source_hidden": true }, "mystnb": { "code_prompt_show": "Convert to numerical functions" }, "tags": [ "hide-input", "remove-output", "scroll-input" ] }, "outputs": [], "source": [ "def lambdify_expressions() -> (\n", " tuple[\n", " dict[str, ParametrizedFunction],\n", " dict[str, ParametrizedFunction],\n", " dict[str, dict[int, ParametrizedFunction]],\n", " dict[str, dict[int, float]],\n", " ]\n", "):\n", " intensity_funcs = {}\n", " alpha_x_funcs = {}\n", " alpha_z_funcs = defaultdict(dict)\n", " original_parameters = defaultdict(dict)\n", " for title, ref_models in tqdm(\n", " MODELS.items(),\n", " desc=\"Lambdifying\",\n", " disable=NO_TQDM,\n", " ):\n", " reference_subsystem = 1\n", " model = ref_models[reference_subsystem]\n", " intensity_funcs[title] = cached_lambdify(INTENSITY_EXPRS[title], model)\n", " alpha_x_funcs[title] = cached_lambdify(ALPHA_X_EXPRS[title], model)\n", " for ref, model in ref_models.items():\n", " alpha_z_expr = ALPHA_Z_EXPRS[title][ref]\n", " alpha_z_funcs[title][ref] = cached_lambdify(alpha_z_expr, model)\n", " str_parameters = {str(k): v for k, v in model.parameter_defaults.items()}\n", " original_parameters[title][ref] = str_parameters\n", " return (\n", " intensity_funcs,\n", " alpha_x_funcs,\n", " {i: dict(dct) for i, dct in alpha_z_funcs.items()},\n", " {i: dict(dct) for i, dct in original_parameters.items()},\n", " )\n", "\n", "\n", "def cached_lambdify(expr: sp.Expr, model: AmplitudeModel) -> ParametrizedFunction:\n", " func = FUNCTION_CACHE.get(expr)\n", " if func is None:\n", " func = perform_cached_lambdify(\n", " expr,\n", " parameters=model.parameter_defaults,\n", " backend=BACKEND,\n", " )\n", " FUNCTION_CACHE[expr] = func\n", " str_parameters = {str(k): v for k, v in model.parameter_defaults.items()}\n", " func.update_parameters(str_parameters)\n", " return func\n", "\n", "\n", "(\n", " INTENSITY_FUNCS,\n", " ALPHA_X_FUNCS,\n", " ALPHA_Z_FUNCS,\n", " ORIGINAL_PARAMETERS,\n", ") = lambdify_expressions()" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "jupyter": { "source_hidden": true }, "mystnb": { "code_prompt_show": "Generate phase space sample" }, "tags": [ "hide-input" ] }, "outputs": [], "source": [ "N_EVENTS = 100_000\n", "PHSP = generate_phasespace_sample(DECAY, N_EVENTS, seed=0)\n", "for ref in tqdm((1, 2, 3), disable=NO_TQDM, leave=False):\n", " transformer = create_data_transformer(NOMINAL_MODEL[ref], BACKEND)\n", " PHSP.update(transformer(PHSP))\n", " del transformer" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "jupyter": { "source_hidden": true }, "mystnb": { "code_prompt_show": "Compute statistics with parameter bootstrap" }, "tags": [ "hide-input", "scroll-input" ] }, "outputs": [], "source": [ "def create_bootstraps() -> dict[SubSystemID, dict[str, ParameterType]]:\n", " bootstraps = {\n", " ref: ParameterBootstrap(MODEL_FILE, DECAY, NOMINAL_MODEL_TITLE)\n", " for ref, model in NOMINAL_MODEL.items()\n", " }\n", " bootstraps[2] = flip_production_coupling_signs(bootstraps[2], [\"K\", \"L\"])\n", " bootstraps[3] = flip_production_coupling_signs(bootstraps[3], [\"K\", \"D\"])\n", " return {\n", " i: bootstrap.create_distribution(N_BOOTSTRAPS, seed=0)\n", " for i, bootstrap in bootstraps.items()\n", " }\n", "\n", "\n", "def compute_statistics_weighted_alpha() -> (\n", " tuple[dict[str, np.ndarray], dict[str, np.ndarray]]\n", "):\n", " weighted_αx = defaultdict(list)\n", " weighted_αz = defaultdict(list)\n", " resonances = get_resonance_to_reference()\n", " intensity_func = INTENSITY_FUNCS[NOMINAL_MODEL_TITLE]\n", " αx_ref1_func = ALPHA_X_FUNCS[NOMINAL_MODEL_TITLE]\n", " αz_ref1_func = ALPHA_Z_FUNCS[NOMINAL_MODEL_TITLE][1]\n", " original_parameters_ref1 = ORIGINAL_PARAMETERS[NOMINAL_MODEL_TITLE][1]\n", " for resonance, ref in tqdm(\n", " resonances.items(),\n", " desc=\"Computing statistics\",\n", " disable=NO_TQDM,\n", " ):\n", " _filter = [resonance.name.replace(\"(\", R\"\\(\").replace(\")\", R\"\\)\")]\n", " αz_func = ALPHA_Z_FUNCS[NOMINAL_MODEL_TITLE][ref]\n", " original_parameters = ORIGINAL_PARAMETERS[NOMINAL_MODEL_TITLE][ref]\n", " for i in tqdm(range(N_BOOTSTRAPS), disable=NO_TQDM, leave=False):\n", " new_parameters = {k: v[i] for k, v in BOOTSTRAP_PARAMETERS[ref].items()}\n", " new_parameters_ref1 = {\n", " k: v[i] for k, v in BOOTSTRAP_PARAMETERS[1].items()\n", " }\n", " intensity_func.update_parameters(new_parameters_ref1)\n", " αx_ref1_func.update_parameters(new_parameters_ref1)\n", " αz_ref1_func.update_parameters(new_parameters_ref1)\n", " αz_func.update_parameters(new_parameters)\n", " intensities = compute_sub_function(intensity_func, PHSP, _filter)\n", " αx_ref1_array = compute_sub_function(αx_ref1_func, PHSP, _filter).real\n", " αz_ref1_array = compute_sub_function(αz_ref1_func, PHSP, _filter).real\n", " αz_array = compute_sub_function(αz_func, PHSP, _filter).real\n", " αx_ref1 = compute_weighted_average(αx_ref1_array, intensities)\n", " αz_ref1 = compute_weighted_average(αz_ref1_array, intensities)\n", " αz = compute_weighted_average(αz_array, intensities)\n", " weighted_αx[resonance.name].append((αx_ref1, αz_ref1))\n", " weighted_αz[resonance.name].append(αz)\n", " intensity_func.update_parameters(original_parameters_ref1)\n", " αx_ref1_func.update_parameters(original_parameters_ref1)\n", " αz_ref1_func.update_parameters(original_parameters_ref1)\n", " αz_func.update_parameters(original_parameters)\n", " return (\n", " {k: np.array(v) for k, v in weighted_αx.items()},\n", " {k: np.array(v) for k, v in weighted_αz.items()},\n", " )\n", "\n", "\n", "def get_resonance_to_reference() -> dict[Particle, int]:\n", " subsystem_ids: dict[str, SubSystemID] = dict(K=1, L=2, D=3)\n", " resonances = [\n", " c.resonance\n", " for c in sorted(\n", " DECAY.chains,\n", " key=lambda p: (subsystem_ids[p.resonance.name[0]], p.resonance.mass),\n", " )\n", " ]\n", " return {p: subsystem_ids[p.name[0]] for p in resonances}\n", "\n", "\n", "def compute_weighted_average(v: np.ndarray, weights: np.ndarray) -> np.ndarray:\n", " return np.nansum(v * weights, axis=-1) / np.nansum(weights, axis=-1)\n", "\n", "\n", "N_BOOTSTRAPS = 100\n", "BOOTSTRAP_PARAMETERS = create_bootstraps()\n", "STAT_WEIGHTED_ALPHA_REF1, STAT_WEIGHTED_ALPHA_Z = compute_statistics_weighted_alpha()" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "jupyter": { "source_hidden": true }, "mystnb": { "code_prompt_show": "Compute systematics from alternative models" }, "tags": [ "hide-input" ] }, "outputs": [], "source": [ "def compute_systematic_weighted_alpha() -> (\n", " tuple[dict[str, np.ndarray], dict[str, np.ndarray]]\n", "):\n", " weighted_αx = defaultdict(list)\n", " weighted_αz = defaultdict(list)\n", " resonances = get_resonance_to_reference()\n", " for title in tqdm(\n", " MODELS,\n", " disable=NO_TQDM,\n", " desc=\"Computing systematics\",\n", " ):\n", " for resonance, ref in tqdm(resonances.items(), disable=NO_TQDM, leave=False):\n", " _filter = [resonance.name.replace(\"(\", R\"\\(\").replace(\")\", R\"\\)\")]\n", " intensity_func = INTENSITY_FUNCS[title]\n", " αx_func_ref1 = ALPHA_X_FUNCS[title]\n", " αz_func_ref1 = ALPHA_Z_FUNCS[title][1]\n", " αz_func = ALPHA_Z_FUNCS[title][ref]\n", " intensity_func.update_parameters(ORIGINAL_PARAMETERS[title][1])\n", " αx_func_ref1.update_parameters(ORIGINAL_PARAMETERS[title][1])\n", " αz_func_ref1.update_parameters(ORIGINAL_PARAMETERS[title][1])\n", " αz_func.update_parameters(ORIGINAL_PARAMETERS[title][ref])\n", " αx_ref1_array = compute_sub_function(αx_func_ref1, PHSP, _filter)\n", " αz_ref1_array = compute_sub_function(αz_func_ref1, PHSP, _filter)\n", " αz_array = compute_sub_function(αz_func, PHSP, _filter)\n", " intensities = compute_sub_function(intensity_func, PHSP, _filter)\n", " αx_ref1 = compute_weighted_average(αx_ref1_array, intensities).real\n", " αz_ref1 = compute_weighted_average(αz_ref1_array, intensities).real\n", " αz = compute_weighted_average(αz_array, intensities).real\n", " weighted_αx[resonance.name].append((αx_ref1, αz_ref1))\n", " weighted_αz[resonance.name].append(αz)\n", " return (\n", " {k: np.array(v) for k, v in weighted_αx.items()},\n", " {k: np.array(v) for k, v in weighted_αz.items()},\n", " )\n", "\n", "\n", "SYST_WEIGHTED_ALPHA_REF1, SYST_WEIGHTED_ALPHA_Z = compute_systematic_weighted_alpha()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Result and comparison" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "LHCb values are taken [from the original study](https://arxiv.org/pdf/2208.03262.pdf#page=23) {cite}`LHCb-PAPER-2022-002`:" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "jupyter": { "source_hidden": true }, "tags": [ "hide-input", "full-width" ] }, "outputs": [], "source": [ "def tabulate_alpha_z():\n", " with open(\"../data/observable-references.yaml\") as f:\n", " data = yaml.load(f, Loader=yaml.SafeLoader)\n", " lhcb_values = {\n", " k: tuple(float(v) for v in row.split(\" ± \"))\n", " for k, row in data[NOMINAL_MODEL_TITLE][\"alpha_z\"].items()\n", " if k != \"K(892)\"\n", " }\n", " model_ids = list(range(len(MODELS)))\n", " alignment = \"r\".join(\"\" for _ in model_ids)\n", " header = \" & \".join(Rf\"\\textbf{{{i}}}\" for i in model_ids[1:])\n", " src = Rf\"\\begin{{array}}{{l|c|c|{alignment}}}\" \"\\n\"\n", " src += Rf\" & \\textbf{{this study}} & \\textbf{{LHCb}} & {header} \\\\\" \"\\n\"\n", " src += R\"\\hline\" \"\\n\"\n", " src = dedent(src)\n", " for resonance in get_resonance_to_reference():\n", " src += f\" {resonance.latex} & \"\n", " stat_array = 1e3 * STAT_WEIGHTED_ALPHA_Z[resonance.name]\n", " syst_array = 1e3 * SYST_WEIGHTED_ALPHA_Z[resonance.name]\n", " syst_diff = syst_array[1:] - syst_array[0]\n", " value = syst_array[0]\n", " std = stat_array.std()\n", " min_ = syst_diff.min() # LS-model excluded\n", " max_ = syst_diff.max() # LS-model excluded\n", " src += Rf\"{value:>+.0f} \\pm {std:.0f}_{{{min_:+.0f}}}^{{{max_:+.0f}}}\"\n", " src += \" & \"\n", " lhcb = lhcb_values.get(resonance.name)\n", " if lhcb is not None:\n", " val, stat, syst, _ = lhcb\n", " src += Rf\"{val:+.0f} \\pm {stat:.0f} \\pm {syst:.0f}\"\n", " for model_id, diff in enumerate(syst_diff, 1):\n", " diff_str = f\"{diff:>+.0f}\"\n", " if diff == syst_diff.max():\n", " src += Rf\" & \\color{{red}}{{{diff_str}}}\"\n", " elif diff == syst_diff.min():\n", " src += Rf\" & \\color{{blue}}{{{diff_str}}}\"\n", " else:\n", " src += f\" & {diff_str}\"\n", " src += R\" \\\\\" \"\\n\"\n", " src += R\"\\end{array}\"\n", " return Latex(src)\n", "\n", "\n", "tabulate_alpha_z()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## Distribution analysis" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "jupyter": { "source_hidden": true }, "tags": [ "hide-input", "full-width" ] }, "outputs": [], "source": [ "def plot_distributions():\n", " layout_kwargs = dict(\n", " font=dict(size=18),\n", " height=800,\n", " width=1000,\n", " xaxis_title=\"Resonance\",\n", " yaxis_title=\"ɑ̅z\",\n", " showlegend=False,\n", " )\n", " wrapped_titles = [\"
\".join(wrap(t, width=60)) for t in MODELS]\n", " colors = dict( # https://stackoverflow.com/a/44727682\n", " K=\"#d62728\", # red\n", " L=\"#1f77b4\", # blue\n", " D=\"#2ca02c\", # green\n", " )\n", " align_left = {\n", " \"K(700)\",\n", " \"K(1430)\",\n", " \"L(1520)\",\n", " \"L(2000)\",\n", " \"L(1670)\",\n", " \"D(1700)\",\n", " }\n", "\n", " fig = go.Figure()\n", " for i, (resonance, alpha_z) in enumerate(STAT_WEIGHTED_ALPHA_Z.items()):\n", " subsystem_id = resonance[0]\n", " fig.add_trace(\n", " go.Violin(\n", " y=alpha_z,\n", " hovertemplate=\"ɑ̅z = %{y:.3f}\",\n", " marker_color=colors[subsystem_id],\n", " meanline_visible=True,\n", " name=to_unicode(resonance),\n", " points=\"all\",\n", " text=wrapped_titles,\n", " )\n", " )\n", " fig.add_annotation(\n", " x=i,\n", " y=np.median(alpha_z),\n", " xshift=-65 if resonance in align_left else +50,\n", " font_color=colors[subsystem_id],\n", " font_size=14,\n", " showarrow=False,\n", " text=format_average(resonance),\n", " )\n", " fig.update_layout(\n", " title=\"Statistical distribution of weighted ɑ̅z\",\n", " **layout_kwargs,\n", " )\n", " fig.update_xaxes(tickangle=45)\n", " fig.show()\n", " fig.update_layout(font=dict(size=24))\n", " fig.write_image(\"_images/alpha-z-per-resonance-statistical.svg\")\n", "\n", " fig = go.Figure()\n", " for i, (resonance, alpha_z) in enumerate(SYST_WEIGHTED_ALPHA_Z.items()):\n", " subsystem_id = resonance[0]\n", " fig.add_trace(\n", " go.Box(\n", " y=alpha_z,\n", " boxpoints=\"all\",\n", " hovertemplate=(\n", " \"%{text}
%{x}: ɑ̅z = %{y:.3f}\"\n", " ),\n", " marker_color=colors[subsystem_id],\n", " name=to_unicode(resonance),\n", " text=wrapped_titles,\n", " line=dict(color=\"rgba(0,0,0,0)\"),\n", " fillcolor=\"rgba(0,0,0,0)\",\n", " )\n", " )\n", " fig.add_annotation(\n", " x=i,\n", " y=np.median(alpha_z),\n", " xshift=-65 if resonance in align_left else +15,\n", " font_color=colors[subsystem_id],\n", " font_size=14,\n", " showarrow=False,\n", " text=format_average(resonance),\n", " )\n", " fig.update_layout(\n", " title=\"Systematics distribution of weighted ɑ̅z\",\n", " **layout_kwargs,\n", " )\n", " fig.update_xaxes(tickangle=45)\n", " fig.show()\n", " fig.update_layout(font=dict(size=24))\n", " fig.write_image(\"_images/alpha-z-per-resonance-systematics.svg\")\n", "\n", "\n", "def to_unicode(resonance: str) -> str:\n", " return resonance.replace(\"L\", \"Λ\").replace(\"D\", \"Δ\")\n", "\n", "\n", "def format_average(resonance: str) -> str:\n", " stat_alpha = 1e3 * STAT_WEIGHTED_ALPHA_Z[resonance]\n", " syst_alpha = 1e3 * SYST_WEIGHTED_ALPHA_Z[resonance]\n", " diff = syst_alpha[1:] - syst_alpha[0]\n", " mean = syst_alpha[0]\n", " std = stat_alpha.std()\n", " return f\"{diff.max():+.0f}
{mean:+.0f}±{std:.0f}
{diff.min():+.0f}\"\n", "\n", "\n", "%config InlineBackend.figure_formats = ['svg']\n", "plot_distributions()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ ":::{only} latex\n", "![](_images/alpha-z-per-resonance-statistical.svg)\n", "![](_images/alpha-z-per-resonance-systematics.svg)\n", ":::" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "(alpha-xz-correlations-per-resonance)=\n", "### XZ-correlations" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "It follows from the definition of $\\vec\\alpha$ for a single resonance that:\n", "\n", "$$\n", "\\begin{array}{rcl}\n", "\\alpha_x &=& \\left|\\vec\\alpha\\right| \\int I_0 \\sin\\left(\\zeta^0\\right) \\,\\mathrm{d}\\tau \\big/ \\int I_0 \\,\\mathrm{d}\\tau \\\\ \n", "\\alpha_z &=& \\left|\\vec\\alpha\\right| \\int I_0 \\cos\\left(\\zeta^0\\right) \\,\\mathrm{d}\\tau \\big/ \\int I_0 \\,\\mathrm{d}\\tau\n", "\\end{array}\n", "$$\n", "\n", "This means that the correlation if 100% if $I_0$ does not change in the bootstrap. This may explain the $xz$-correlation observed for $\\overline{\\alpha}$ over the complete decay as reported in {ref}`uncertainties:Average polarimetry values`." ] }, { "cell_type": "code", "execution_count": null, "metadata": { "jupyter": { "source_hidden": true }, "tags": [ "hide-input" ] }, "outputs": [], "source": [ "def round_nested(expression: sp.Expr, n_decimals: int) -> sp.Expr:\n", " no_sqrt_expr = expression\n", " for node in sp.preorder_traversal(expression):\n", " if node.free_symbols:\n", " continue\n", " if isinstance(node, sp.Pow) and node.args[1] == 1 / 2:\n", " no_sqrt_expr = no_sqrt_expr.xreplace({node: node.n()})\n", " rounded_expr = no_sqrt_expr\n", " for node in sp.preorder_traversal(no_sqrt_expr):\n", " if isinstance(node, (float, sp.Float)):\n", " rounded_expr = rounded_expr.xreplace({node: round(node, n_decimals)})\n", " return rounded_expr\n", "\n", "\n", "resonance_name = \"L(2000)\"\n", "resonance_couplings = {\n", " k: 0 if \"production\" in str(k) and resonance_name not in str(k) else v\n", " for k, v in NOMINAL_MODEL[1].parameter_defaults.items()\n", "}\n", "intensity_symbol = sp.Symbol(f\"I_{{{resonance_name}}}\")\n", "intensity_expr = round_nested(\n", " INTENSITY_EXPRS[NOMINAL_MODEL_TITLE].xreplace(resonance_couplings).simplify(),\n", " n_decimals=3,\n", ")\n", "Math(aslatex({intensity_symbol: intensity_expr}))" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "jupyter": { "source_hidden": true }, "tags": [ "hide-input" ] }, "outputs": [], "source": [ "alpha_symbol = sp.Symbol(Rf\"\\alpha_{{x,{resonance_name}}}\")\n", "alpha_expr = (\n", " round_nested(\n", " ALPHA_X_EXPRS[NOMINAL_MODEL_TITLE].xreplace(resonance_couplings).simplify(),\n", " n_decimals=3,\n", " )\n", " .rewrite(sp.conjugate)\n", " .simplify()\n", ")\n", "Math(aslatex({alpha_symbol: alpha_expr}))" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "jupyter": { "source_hidden": true }, "tags": [ "hide-input" ] }, "outputs": [], "source": [ "alpha_symbol = sp.Symbol(Rf\"\\alpha_{{z,{resonance_name}}}\")\n", "alpha_expr = (\n", " round_nested(\n", " ALPHA_Z_EXPRS[NOMINAL_MODEL_TITLE][1]\n", " .xreplace(resonance_couplings)\n", " .simplify(),\n", " n_decimals=3,\n", " )\n", " .rewrite(sp.conjugate)\n", " .simplify()\n", ")\n", "Math(aslatex({alpha_symbol: alpha_expr}))" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "jupyter": { "source_hidden": true }, "tags": [ "full-width", "hide-input" ] }, "outputs": [], "source": [ "def plot_correlation_xz_mpl(stat_or_syst, typ: str) -> None:\n", " resonances = get_resonance_to_reference()\n", " n_resonances = len(resonances)\n", " n_cols = ceil(sqrt(n_resonances))\n", " n_rows = ceil(n_resonances / n_cols)\n", " fig, axes = plt.subplots(\n", " figsize=(12, 8),\n", " ncols=n_cols,\n", " nrows=n_rows,\n", " )\n", " fig.suptitle(typ + R\" $\\overline{\\alpha}_{xz}$-distribution\")\n", " colors = dict( # https://stackoverflow.com/a/44727682\n", " K=\"#d62728\", # red\n", " L=\"#1f77b4\", # blue\n", " D=\"#2ca02c\", # green\n", " )\n", " for ax, resonance in zip(axes.flatten(), resonances):\n", " subsystem = resonance.name[0]\n", " color = colors[subsystem]\n", " αx, αz = stat_or_syst[resonance.name].T\n", " if αx.std() == 0:\n", " correlation = 1\n", " slope = 0\n", " else:\n", " correlation = np.corrcoef(αx, αz)[0, 1]\n", " slope, _ = np.polyfit(αz, αx, deg=1)\n", " ax.scatter(αz, αx, c=color, s=1)\n", " ax.set_aspect(\"equal\", adjustable=\"datalim\")\n", " ax.set_title(f\"${resonance.latex}$\", y=0.85)\n", " kwargs = dict(c=color, size=8, transform=ax.transAxes)\n", " ax.text(0.03, 0.11, f\"slope: {slope:+.3g}\", **kwargs)\n", " ax.text(0.03, 0.03, f\"correlation: {correlation:+.3g}\", **kwargs)\n", " if ax in axes[-1, :]:\n", " ax.set_xlabel(R\"$\\overline{\\alpha}_z$\")\n", " if ax in axes[:, 0]:\n", " ax.set_ylabel(R\"$\\overline{\\alpha}_x$\")\n", " fig.tight_layout()\n", " fig.savefig(f\"_images/alpha-xz-{typ.lower()}.svg\")\n", " plt.show()\n", " plt.close()\n", "\n", "\n", "%config InlineBackend.figure_formats = ['svg']\n", "plot_correlation_xz_mpl(STAT_WEIGHTED_ALPHA_REF1, \"Statistics\")\n", "plot_correlation_xz_mpl(SYST_WEIGHTED_ALPHA_REF1, \"Systematics\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ ":::{only} latex\n", "![](_images/alpha-xz-statistics.svg)\n", "![](_images/alpha-xz-systematics.svg)\n", ":::" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "jupyter": { "source_hidden": true }, "tags": [ "full-width", "hide-input" ] }, "outputs": [], "source": [ "def plot_correlation_xz(stat_or_syst, typ: str) -> None:\n", " resonances = get_resonance_to_reference()\n", " n_resonances = len(resonances)\n", " n_cols = ceil(sqrt(n_resonances))\n", " n_rows = ceil(n_resonances / n_cols)\n", " fig = make_subplots(\n", " cols=n_cols,\n", " rows=n_rows,\n", " subplot_titles=[to_unicode(r.name) for r in resonances],\n", " horizontal_spacing=0.02,\n", " vertical_spacing=0.08,\n", " )\n", " fig.update_layout(\n", " title_text=(\n", " \"Systematics distribution of weighted ɑ̅xz\"\n", " ),\n", " height=800,\n", " width=1000,\n", " showlegend=False,\n", " )\n", " colors = dict( # https://stackoverflow.com/a/44727682\n", " K=\"#d62728\", # red\n", " L=\"#1f77b4\", # blue\n", " D=\"#2ca02c\", # green\n", " )\n", " wrapped_titles = [\"
\".join(wrap(t, width=60)) for t in MODELS]\n", " hovertemplate = (\n", " \"ɑ̅x = %{y:.3f}, ɑ̅z = %{x:.3f}\"\n", " )\n", " if \"syst\" in typ.lower():\n", " hovertemplate = \"%{text}
\" + hovertemplate\n", " for i, resonance in enumerate(resonances):\n", " αx, αz = stat_or_syst[resonance.name].T\n", " subsystem_name = resonance.name[0]\n", " fig.add_trace(\n", " go.Scatter(\n", " x=αz,\n", " y=αx,\n", " hovertemplate=hovertemplate,\n", " marker_color=colors[subsystem_name],\n", " name=to_unicode(resonance.name),\n", " text=wrapped_titles,\n", " mode=\"markers\",\n", " ),\n", " col=i % n_cols + 1,\n", " row=i // n_cols + 1,\n", " )\n", " fig.show()\n", " fig.write_image(f\"_images/alpha-xz-{typ.lower()}-plotly.svg\")\n", "\n", "\n", "%config InlineBackend.figure_formats = ['svg']\n", "plot_correlation_xz(STAT_WEIGHTED_ALPHA_REF1, \"Statistics\")\n", "plot_correlation_xz(SYST_WEIGHTED_ALPHA_REF1, \"Systematics\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "::::{only} latex\n", ":::{tip}\n", "The following plots are interactive and can best be viewed on [lc2pkpi-polarimetry.docs.cern.ch](https://lc2pkpi-polarimetry.docs.cern.ch).\n", ":::\n", "\n", "![](_images/alpha-xz-statistics-plotly.svg)\n", "![](_images/alpha-xz-systematics-plotly.svg)\n", "::::" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.8.15" } }, "nbformat": 4, "nbformat_minor": 4 }