lilit.likelihood

  1import pickle
  2import os
  3
  4import matplotlib.pyplot as plt
  5import numpy as np
  6from cobaya.likelihood import Likelihood
  7
  8
  9class LiLit(Likelihood):
 10
 11    """Class defining the Likelihood for LiteBIRD (LiLit).
 12
 13    Within LiLit, the most relevant study cases of LiteBIRD (T, E, B) are already tested and working. So, if you need to work with those, you should not need to look into the actual definition of the likelihood function, since you can proptly start running your MCMCs. Despite this, you should provide to the likelihood some file where to find the proper LiteBIRD noise power spectra, given that LiLit is implementing a simple inverse noise weighting just as a place-holder for something more realistic. As regards lensing, LiLit will need you to pass the reconstruction noise, since its computation is not coded, thus there is no place-holder for lensing.
 14
 15    Parameters:
 16        name (str):
 17            The name for the likelihood, used in the output. It is necessary to pass it to LiLit. (default: None).
 18        fields (list):
 19            List of fields in the data file (default: None).
 20        lmax (int or list):
 21            Maximum multipole to consider (default: None).
 22        like (str, optional):
 23            Type of likelihood to use (default: "exact"). Currently supports "exact" and "gaussian".
 24        lmin (int or list):
 25            Minimum multipole to consider (default: 2).
 26        cl_file (str, optional):
 27            Path to Cl file (default: None).
 28        nl_file (str, optional):
 29            Path to noise file (default: None).
 30        mapping (dict, optional):
 31            Dictionary of mapping between the fields in the noise file and the fields in the likelihood, used only if nl_file has .txt extension (default: None).
 32        experiment (str, optional):
 33            Name of experiment (default: None).
 34        nside (int, optional):
 35            Nside of the map (default: None).
 36        r (float, optional):
 37            Tensor-to-scalar ratio (default: None).
 38        nt (float, optional):
 39            Tensor spectral tilt (default: None).
 40        pivot_t (float, optional):
 41            Pivot scale of the tensor primordial power spectrum (default: 0.01).
 42        fsky (float or list):
 43            Sky fraction (default: 1).
 44        sep (str, optional):
 45            Separator used in the data file (default: "").
 46        debug (bool, optional):
 47            If True, produces more verbose output (default: None).
 48
 49
 50    Attributes:
 51        fields (list):
 52            List of fields in the data file.
 53        n_fields (int):
 54            Number of fields.
 55        keys (list):
 56            List of keywords for the dictionaries.
 57        gauss_keys (list):
 58            List of keywords for the Gaussian likelihood (4-points).
 59        sigma2 (np.ndarray):
 60            Array of covariances for the Gaussian likelihood case.
 61        lmax (int or list):
 62            List of lmax values.
 63        lmaxes (dict):
 64            Dictionary of lmax values.
 65        fsky (int or list):
 66            List of fsky values.
 67        fskies (dict):
 68            Dictionary of fsky values.
 69        lmin (int or list):
 70            Minimum multipole to consider.
 71        lmins (dict):
 72            Dictionary of lmin values.
 73        like (str):
 74            Type of likelihood to use.
 75        cl_file (str):
 76            Path to Cl file.
 77        fiduCLS (dict):
 78            Dictionary of fiducial Cls.
 79        noiseCLS (dict):
 80            Dictionary of noise Cls.
 81        fiduCOV (np.ndarray):
 82            Fiducial covariance matrix obtained from the corresponding dictionary.
 83        noiseCOV (np.ndarray):
 84            Noise covariance matrix obtained from the corresponding dictionary.
 85        data (np.ndarray):
 86            Data vector obtained by summing fiduCOV + noiseCOV.
 87        cobaCLS (dict):
 88            Dictionary of Cobaya Cls.
 89        cobaCOV (np.ndarray):
 90            Cobaya covariance matrix obtained from the corresponding dictionary.
 91        coba (np.ndarray):
 92            Cobaya vector obtained by summing cobaCOV + noiseCOV.
 93        nl_file (str):
 94            Path to noise file.
 95        mapping (dict):
 96            Dictionary of mapping between the fields in the noise file and the fields in the likelihood, used only if nl_file has .txt extension.
 97        experiment (str):
 98            Name of experiment.
 99        nside (int):
100            Nside of the map.
101        r (float):
102            Tensor-to-scalar ratio.
103        nt (float):
104            Tensor spectral tilt.
105        pivot_t (float):
106            Pivot scale of the tensor primordial power spectrum.
107        sep (str):
108            Separator used in the data file.
109        debug (bool):
110            If True, produces more output.
111    """
112
113    def __init__(
114        self,
115        name=None,
116        fields=None,
117        lmax=None,
118        like="exact",
119        lmin=2,
120        cl_file=None,
121        nl_file=None,
122        mapping=None,
123        experiment=None,
124        nside=None,
125        r=None,
126        nt=None,
127        pivot_t=0.01,
128        fsky=1,
129        sep="",
130        debug=None,
131    ):
132        # Check that the user has provided the name of the likelihood
133        assert (
134            name is not None
135        ), "You must provide the name of the likelihood (e.g. 'BB' or 'TTTEEE')"
136        # Check that the user has provided the fields
137        assert (
138            fields is not None
139        ), "You must provide the fields (e.g. 'b' or ['t', 'e'])"
140        # Check that the user has provided the maximum multipole
141        assert lmax is not None, "You must provide the lmax (e.g. 300)"
142
143        self.fields = fields
144        self.n = len(fields)
145        self.lmin = lmin
146        self.like = like
147        self.sep = sep
148        self.cl_file = cl_file
149        self.nl_file = nl_file
150        if self.nl_file.endswith(".txt"):
151            self.mapping = mapping
152        self.experiment = experiment
153        if self.experiment is not None:
154            # Check that the user has provided the nside if an experiment is used
155            assert nside is not None, "You must provide an nside to compute the noise"
156            self.nside = nside
157        self.debug = debug
158        self.keys = self.get_keys()
159        if "bb" in self.keys:
160            # Check that the user has provided the tensor-to-scalar ratio if a BB likelihood is used
161            assert (
162                r is not None
163            ), "You must provide the tensor-to-scalar ratio r for the fiducial production (defaul is at 0.01 Mpc^-1)"
164            self.r = r
165            self.nt = nt
166            self.pivot_t = pivot_t
167
168        self.set_lmin_lmax_fsky(lmin, lmax, fsky)
169
170        Likelihood.__init__(self, name=name)
171
172    def set_lmin_lmax_fsky(self, lmin, lmax, fsky):
173        """Take lmin, lmax and fsky parameters and set the corresponding attributes.
174
175        Sets the minimum multipole, the maximum multipole and the sky fraction. This handles automatically the case of a single value or a list of values. Note that the lmin, lmax and fsky for the cross-correlations are set to the geometrical mean of the lmin, lmax and fsky of the two fields. This approximation has been tested and found to be accurate, at least assuming that the two masks of the two considered multipoles are very overlapped.
176
177        Parameters:
178            lmin (int or list):
179                Value or list of values of lmin.
180            lmax (int or list):
181                Value or list of values of lmax.
182            fsky (float or list):
183                Value or list of values of fsky.
184        """
185
186        self.lmins = {}
187        self.lmaxs = {}
188        self.fskies = {}
189
190        # Set lmin
191        if isinstance(lmin, list):
192            assert (
193                len(lmin) == self.n
194            ), "If you provide multiple lmin, they must match the number of requested fields with the same order"
195            for i in range(self.n):
196                for j in range(i, self.n):
197                    key = self.fields[i] + self.sep + self.fields[j]
198                    self.lmins[key] = int(
199                        np.ceil(np.sqrt(lmin[i] * lmin[j]))
200                    )  # this approximaiton allows to gain some extra multipoles in the cross-correalation for which the SNR is still good.
201                    self.lmins[key[::-1]] = int(np.ceil(np.sqrt(lmin[i] * lmin[j])))
202            self.lmin = min(lmin)
203        else:
204            self.lmin = lmin
205
206        # Set lmax
207        if isinstance(lmax, list):
208            assert (
209                len(lmax) == self.n
210            ), "If you provide multiple lmax, they must match the number of requested fields with the same order"
211            for i in range(self.n):
212                for j in range(i, self.n):
213                    key = self.fields[i] + self.sep + self.fields[j]
214                    self.lmaxs[key] = int(
215                        np.floor(np.sqrt(lmax[i] * lmax[j]))
216                    )  # this approximaiton allows to gain some extra multipoles in the cross-correalation for which the SNR is still good.
217                    self.lmaxs[key[::-1]] = int(np.floor(np.sqrt(lmax[i] * lmax[j])))
218            self.lmax = max(lmax)
219        else:
220            self.lmax = lmax
221
222        # Set fsky
223        if isinstance(fsky, list):
224            assert (
225                len(fsky) == self.n
226            ), "If you provide multiple fsky, they must match the number of requested fields with the same order"
227            for i in range(self.n):
228                for j in range(i, self.n):
229                    key = self.fields[i] + self.sep + self.fields[j]
230                    self.fskies[key] = np.sqrt(
231                        fsky[i] * fsky[j]
232                    )  # this approximation for the cross-correlation is not correct in the case of two very different masks (verified with simulations)
233                    self.fskies[key[::-1]] = np.sqrt(fsky[i] * fsky[j])
234            self.fsky = None
235        else:
236            self.fsky = fsky
237        return
238
239    def cov_filling(self, cov_dict):
240        """Fill covariance matrix with appropriate spectra.
241
242        Computes the covariance matrix once given a dictionary. Returns the covariance matrix of the considered fields, in a shape equal to (num_fields x num_fields x lmax). Note that if more than one lmax, or lmin, is specified, there will be null values in the matrices, making them singular. This will be handled in another method.
243
244        Parameters:
245            cov_dict (dict):
246                The input dictionary of spectra.
247        """
248        # Initialize output array
249        res = np.zeros((self.n, self.n, self.lmax + 1))
250
251        # Loop over field1
252        for i, field1 in enumerate(self.fields):
253            # Loop over field2
254            for j, field2 in enumerate(self.fields[i:]):
255                # Get the index of field2
256                j += i
257
258                # Get the key of the covariance matrix
259                key = field1 + self.sep + field2
260
261                # Get lmin and lmax for this field pair
262                lmin = self.lmins.get(key, self.lmin)
263                lmax = self.lmaxs.get(key, self.lmax)
264
265                # Get the covariance for this field pair
266                cov = cov_dict.get(key, np.zeros(lmax + 1))
267
268                # Set the appropriate values in the covariance matrix
269                res[i, j, lmin : lmax + 1] = cov[lmin : lmax + 1]
270                # Fill the covariance matrix symmetrically
271                res[j, i] = res[i, j]
272
273        return res
274
275    def get_keys(self):
276        """Extracts the keys that has to be used as a function of the requested fields. These will be the usual 2-points, e.g., tt, te, ee, etc."""
277        # List of all the possible combinations of the requested fields
278        res = [
279            self.fields[i] + self.sep + self.fields[j]
280            for i in range(self.n)
281            for j in range(i, self.n)
282        ]
283        # Print the requested keys
284        if self.debug:
285            print(f"\nThe requested keys are {res}")
286        return res
287
288    def get_Gauss_keys(self):
289        """Find the proper dictionary keys for the requested fields.
290
291        Extracts the keys that has to be used as a function of the requested fields for the Gaussian likelihood. Indeed, the Gaussian likelihood is computed using 4-points, so the keys are different. E.g., there will be keys such as tttt, ttee, tete, etc.
292        """
293        # Calculate the number of elements in the covariance matrix
294        n = int(self.n * (self.n + 1) / 2)
295        # Initialize a 3-d array to store the keys
296        res = np.zeros((n, n, 4), dtype=str)
297        # Loop over all the elements in the covariance matrix
298        for i in range(n):
299            for j in range(i, n):
300                # Generate a key for the i-th and j-th element
301                elem = self.keys[i] + self.sep + self.keys[j]
302                # Loop over all the characters in the key
303                for k in range(4):
304                    # Add the k-th character to the i-th, j-th, and k-th
305                    # indices of the array
306                    res[i, j, k] = np.asarray(list(elem)[k])
307                    res[j, i, k] = res[i, j, k]
308        # Print the keys if the debug flag is set
309        if self.debug:
310            print(f"\nThe requested keys are {res}")
311        # Return the keys
312        return res
313
314    def find_spectrum(self, input_dict, key):
315        """Find a spectrum in a given dictionary.
316
317        Returns the corresponding power sepctrum for a given key. If the key is not found, it will try to find the reverse key. Otherwise it will fill the array with zeros.
318
319        Parameters:
320            input_dict (dict):
321                Dictionary where you want to search for keys.
322
323            key (str):
324                Key to search for.
325        """
326        # create a zero array
327        res = np.zeros(self.lmax + 1)
328
329        # get lmin and lmax
330        lmin = self.lmins.get(key, self.lmin)
331        lmax = self.lmaxs.get(key, self.lmax)
332
333        # try to find the key in the dictionary
334        if key in input_dict:
335            cov = input_dict[key]
336        # if the key is not found, try the reverse key
337        else:
338            cov = input_dict.get(key[::-1], np.zeros(lmax + 1))
339
340        # fill the array with the requested spectrum
341        res[lmin : lmax + 1] = cov[lmin : lmax + 1]
342
343        return res
344
345    def sigma(self, keys, fiduDICT, noiseDICT):
346        """Define the covariance matrix for the Gaussian case.
347
348        In case of Gaussian likelihood, this returns the covariance matrix needed for the computation of the chi2. Note that the inversion is done in a separate funciton.
349
350        Parameters:
351            keys (dict):
352                Keys for the covariance elements.
353
354            fiduDICT (dict):
355                Dictionary with the fiducial spectra.
356
357            noiseDICT (dict):
358                Dictionary with the noise spectra.
359        """
360        # The covariance matrix has to be symmetric.
361        # The number of parameters in the likelihood is self.n.
362        # The covariance matrix is a (self.n x self.n x self.lmax+1) ndarray.
363        # We will store the covariance matrix in a (n x n x self.lmax+1) ndarray,
364        # where n = int(self.n * (self.n + 1) / 2).
365        n = int(self.n * (self.n + 1) / 2)
366        res = np.zeros((n, n, self.lmax + 1))
367        for i in range(n):  # Loop over all combinations of pairs of spectra
368            for j in range(i, n):
369                C_AC = self.find_spectrum(
370                    fiduDICT, keys[i, j, 0] + keys[i, j, 2]
371                )  # Find the fiducial spectra for each pair
372                C_BD = self.find_spectrum(fiduDICT, keys[i, j, 1] + keys[i, j, 3])
373                C_AD = self.find_spectrum(fiduDICT, keys[i, j, 0] + keys[i, j, 3])
374                C_BC = self.find_spectrum(fiduDICT, keys[i, j, 1] + keys[i, j, 2])
375                N_AC = self.find_spectrum(
376                    noiseDICT, keys[i, j, 0] + keys[i, j, 2]
377                )  # Find the noise spectra for each pair
378                N_BD = self.find_spectrum(noiseDICT, keys[i, j, 1] + keys[i, j, 3])
379                N_AD = self.find_spectrum(noiseDICT, keys[i, j, 0] + keys[i, j, 3])
380                N_BC = self.find_spectrum(noiseDICT, keys[i, j, 1] + keys[i, j, 2])
381                if self.fsky is not None:  # If self.fsky is defined, use the fsky value
382                    res[i, j] = (
383                        (C_AC + N_AC) * (C_BD + N_BD) + (C_AD + N_AD) * (C_BC + N_BC)
384                    ) / self.fsky
385                else:  # Otherwise, use the fsky values from the input spectra
386                    AC = keys[i, j, 0] + keys[i, j, 2]
387                    BD = keys[i, j, 1] + keys[i, j, 3]
388                    AD = keys[i, j, 0] + keys[i, j, 3]
389                    BC = keys[i, j, 1] + keys[i, j, 2]
390                    AB = keys[i, j, 0] + keys[i, j, 1]
391                    CD = keys[i, j, 2] + keys[i, j, 3]
392                    res[i, j] = (
393                        np.sqrt(self.fskies[AC] * self.fskies[BD])
394                        * (C_AC + N_AC)
395                        * (C_BD + N_BD)
396                        + np.sqrt(self.fskies[AD] * self.fskies[BC])
397                        * (C_AD + N_AD)
398                        * (C_BC + N_BC)
399                    ) / (self.fskies[AB] * self.fskies[CD])
400                res[j, i] = res[i, j]
401        return res
402
403    def inv_sigma(self, sigma):
404        """Invert the covariance matrix of the Gaussian case.
405
406        Inverts the previously calculated sigma ndarray. Note that some elements may be null, thus the covariance may be singular. If so, this also reduces the dimension of the matrix by deleting the corresponding row and column.
407
408        Parameters:
409            ndarray (np.ndarray):
410                (self.n x self.n x self.lmax+1) ndarray with the previously computed sigma (not inverted).
411        """
412        # Initialize array to store the inverted covariance matrices
413        res = np.zeros(self.lmax + 1, dtype=object)
414
415        # Loop over multipoles
416        for i in range(self.lmax + 1):
417            # Check if matrix is singular
418            COV = sigma[:, :, i]
419            if np.linalg.det(COV) == 0:
420                # Get indices of null diagonal elements
421                idx = np.where(np.diag(COV) == 0)[0]
422                # Remove corresponding rows and columns
423                COV = np.delete(COV, idx, axis=0)
424                COV = np.delete(COV, idx, axis=1)
425            # Invert matrix
426            res[i] = np.linalg.inv(COV)
427        return res[2:]
428
429    def get_reduced_data(self, mat):
430        """Find the reduced data eliminating the singularity of the matrix.
431
432        Cuts the row and column corresponding to a zero diagonal value. Indeed, in case of different lmax, or lmin, for the fields, you will have singular marices.
433
434        Parameters:
435            ndarray (np.ndarray):
436                A ndarray containing the covariance matrices, with some singular ones.
437        """
438        # Select the indices corresponding to the zero diagonal
439        idx = np.where(np.diag(mat) == 0)[0]
440        # Delete the rows and columns from the matrix
441        return np.delete(np.delete(mat, idx, axis=0), idx, axis=1)
442
443    def CAMBres2dict(self, camb_results):
444        """Takes the CAMB result product from get_cmb_power_spectra and convert it to a dictionary with the proper keys.
445
446        Parameters:
447            camb_results (CAMBdata):
448                CAMB result product from the method get_cmb_power_spectra.
449        """
450        # Get the number of multipoles
451        ls = np.arange(camb_results["total"].shape[0], dtype=np.int64)
452        # Mapping between the CAMB keys and the ones we want
453        mapping = {"tt": 0, "ee": 1, "bb": 2, "te": 3, "et": 3}
454        # Initialize the output dictionary
455        res = {"ell": ls}
456        # Loop over the keys we want
457        for key, i in mapping.items():
458            # Save the results
459            res[key] = camb_results["total"][:, i]
460        # Check if we want the lensing potential
461        if "pp" in self.keys:
462            # Get the lensing potential
463            cl_lens = camb_results.get("lens_potential")
464            # Check if it exists
465            if cl_lens is not None:
466                # Save it
467                res["pp"] = cl_lens[:, 0].copy()
468                # Check if we want the cross terms
469                if "pt" in self.keys and "pe" in self.keys:
470                    # Loop over the cross terms
471                    for i, cross in enumerate(["pt", "pe"]):
472                        # Save the result
473                        res[cross] = cl_lens[:, i + 1].copy()
474                        # Save the symmetric term
475                        res[cross[::-1]] = res[cross]
476        return res
477
478    def txt2dict(self, txt, mapping=None, apply_ellfactor=None):
479        """Takes a txt file and convert it to a dictionary. This requires a way to map the columns to the keys. Also, it is possible to apply an ell factor to the Cls.
480
481        Parameters:
482            txt (str):
483                Path to txt file containing the spectra as columns.
484            mapping (dict):
485                Dictionary containing the mapping. Keywords will become the new keywords and values represent the index of the corresponding column.
486        """
487        # Define the ell values from the length of the txt file
488        assert (
489            mapping is not None
490        ), "You must provide a way to map the columns of your txt to the keys of a dictionary"
491        ls = np.arange(txt.shape[0], dtype=np.int64)
492        res = {"ell": ls}
493        # Loop over the mapping and extract the corresponding column from the txt file
494        # and store it in the dictionary under the corresponding keyword
495        for key, i in mapping.items():
496            if apply_ellfactor:
497                res[key] = txt[:, i] * ls * (ls + 1) / 2 / np.pi
498            else:
499                res[key] = txt[:, i]
500        return res
501
502    def prod_fidu(self):
503        """Produce fiducial spectra or read the input ones.
504
505        If the user has not provided a Cl file, this function will produce the fiducial power spectra starting from the CAMB inifile for Planck2018. The extra keywords defined will maximize the accordance between the fiducial Cls and the ones obtained from Cobaya. If B-modes are requested, the tensor-to-scalar ratio and the spectral tilt will be set to the requested values. Note that if you do not provide a tilt, this will follow the standard single-field consistency relation. If instead you provide a custom file, stores that.
506        """
507        # If a custom file is provided, use that
508        if self.cl_file is not None:
509            # If the file is a pickle file, load it
510            if self.cl_file.endswith(".pkl"):
511                with open(self.cl_file, "rb") as pickle_file:
512                    res = pickle.load(pickle_file)
513            # Otherwise, load it as text file
514            else:
515                txt = np.loadtxt(self.cl_file)
516                mapping = {"tt": 0, "ee": 1, "bb": 2, "te": 3, "et": 3}
517                res = self.txt2dict(txt, mapping)
518            return res
519
520        try:
521            import camb
522        except ImportError:
523            print("CAMB seems to be not installed. Check the requirements.")
524
525        # Read the ini file containing the parameters for CAMB
526        path = os.path.dirname(os.path.abspath(__file__))
527        planck_path = os.path.join(path, "planck_2018.ini")
528        pars = camb.read_ini(planck_path)
529
530        if "bb" in self.keys:  # If we want to include the tensor mode
531            print(f"\nProducing fiducial spectra for r={self.r} and nt={self.nt}")
532            pars.InitPower.set_params(
533                As=2.100549e-9,
534                ns=0.9660499,
535                r=self.r,
536                nt=self.nt,
537                pivot_tensor=self.pivot_t,
538                pivot_scalar=0.05,
539                parameterization=2,
540            )
541            pars.WantTensors = True
542            pars.Accuracy.AccurateBB = True
543        pars.DoLensing = True
544        # _pars.Accuracy.AccuracyBoost = 2 # This helps getting an extra squeeze on the accordance of Cobaya and Fiducial spectra
545
546        if self.debug:
547            print(pars)
548
549        results = camb.get_results(pars)
550        res = results.get_cmb_power_spectra(
551            CMB_unit="muK",
552            lmax=self.lmax,
553            raw_cl=False,
554        )
555        return self.CAMBres2dict(res)
556
557    def prod_noise(self):
558        """Produce noise power spectra or read the input ones.
559
560        If the user has not provided a noise file, this function will produce the noise power spectra for a given experiment with inverse noise weighting of white noise in each channel (TT, EE, BB). Note that you may want to have a look at the procedure since it is merely a place-holder. Indeed, you should provide a more realistic file from which to read the noise spectra, given that inverse noise weighting severely underestimates the amount of noise. If instead you provide the proper custom file, this method stores that.
561        """
562        # If the input noise file is a pickle file, load it.
563        if self.nl_file is not None:
564            if self.nl_file.endswith(".pkl"):
565                with open(self.nl_file, "rb") as pickle_file:
566                    res = pickle.load(pickle_file)
567            # If not, load the file as a text file
568            else:
569                _txt = np.loadtxt(self.nl_file)
570                # Convert the text file to a dictionary
571                res = self.txt2dict(_txt, self.mapping, apply_ellfactor=True)
572            return res
573
574        print(
575            "***WARNING***: the inverse noise weighting performed here severely underestimates \
576            the actual noise level of LiteBIRD. You should provide an input \
577            noise power spectrum with a more realistic noise."
578        )
579
580        try:
581            import yaml
582            from yaml.loader import SafeLoader
583            import healpy as hp
584        except ImportError:
585            print("YAML or Healpy seems to be not installed. Check the requirements.")
586
587        assert (
588            self.experiment is not None
589        ), "You must specify the experiment you want to consider"
590        print(f"\nComputing noise for {self.experiment}")
591
592        path = os.path.dirname(os.path.abspath(__file__))
593        experiments_path = os.path.join(path, "experiments.yaml")
594        with open(experiments_path) as f:
595            data = yaml.load(f, Loader=SafeLoader)
596
597        # Get the instrument data from the saved data
598        instrument = data[self.experiment]
599
600        # Get the FWHM values from the instrument data
601        fwhms = np.array(instrument["fwhm"])
602
603        # Get the frequency values from the instrument data
604        freqs = np.array(instrument["frequency"])
605
606        # Get the depth values from the instrument data
607        depth_p = np.array(instrument["depth_p"])
608        depth_i = np.array(instrument["depth_i"])
609
610        # Convert the depth to a pixel value
611        depth_p /= hp.nside2resol(self.nside, arcmin=True)
612        depth_i /= hp.nside2resol(self.nside, arcmin=True)
613        depth_p = depth_p * np.sqrt(
614            hp.pixelfunc.nside2pixarea(self.nside, degrees=False),
615        )
616        depth_i = depth_i * np.sqrt(
617            hp.pixelfunc.nside2pixarea(self.nside, degrees=False),
618        )
619
620        # Get the number of frequencies
621        n_freq = len(freqs)
622
623        # Define the ell values as a numpy array
624        ell = np.arange(0, self.lmax + 1, 1)
625
626        # Define the keys for the dictionary that will be returned
627        keys = ["tt", "ee", "bb"]
628
629        sigma = np.radians(fwhms / 60.0) / np.sqrt(8.0 * np.log(2.0))
630        sigma2 = sigma**2
631
632        # Calculate the Gaussian beam function
633        g = np.exp(ell * (ell + 1) * sigma2[:, np.newaxis])
634
635        # Calculate the polarization factor
636        pol_factor = np.array(
637            [np.zeros(sigma2.shape), 2 * sigma2, 2 * sigma2, sigma2],
638        )
639
640        # Calculate the polarization factor as a function of ell
641        pol_factor = np.exp(pol_factor)
642
643        # Calculate the Gaussian beam function for each polarization
644        G = []
645        for i, arr in enumerate(pol_factor):
646            G.append(g * arr[:, np.newaxis])
647        g = np.array(G)
648
649        # Initialize the dictionary that will be returned
650        res = {key: np.zeros((n_freq, self.lmax + 1)) for key in keys}
651
652        # Calculate the unnormalized power spectra
653        res["tt"] = 1 / (g[0, :, :] * depth_i[:, np.newaxis] ** 2)
654        res["ee"] = 1 / (g[3, :, :] * depth_p[:, np.newaxis] ** 2)
655        res["bb"] = 1 / (g[3, :, :] * depth_p[:, np.newaxis] ** 2)
656
657        # Calculate the normalized power spectra
658        res["tt"] = ell * (ell + 1) / (np.sum(res["tt"], axis=0)) / 2 / np.pi
659        res["ee"] = ell * (ell + 1) / (np.sum(res["ee"], axis=0)) / 2 / np.pi
660        res["bb"] = ell * (ell + 1) / (np.sum(res["bb"], axis=0)) / 2 / np.pi
661
662        res["tt"][:2] = [0, 0]
663        res["ee"][:2] = [0, 0]
664        res["bb"][:2] = [0, 0]
665
666        return res
667
668    def initialize(self):
669        """Initializes the fiducial spectra and the noise power spectra."""
670        # Compute the fiducial and noise power spectra
671        self.fiduCLS = self.prod_fidu()
672        self.noiseCLS = self.prod_noise()
673
674        # Compute the covariance matrices
675        self.fiduCOV = self.cov_filling(self.fiduCLS)
676        self.noiseCOV = self.cov_filling(self.noiseCLS)
677
678        # Print some information for debugging
679        if self.debug:
680            print(f"Keys of fiducial CLs ---> {self.fiduCLS.keys()}")
681            print(f"Keys of noise CLs ---> {self.noiseCLS.keys()}")
682
683            print("\nPrinting the first few values to check that it starts from 0...")
684            field = list(self.fiduCLS.keys())[0]
685            print(f"Fiducial CLs for {field.upper()} ---> {self.fiduCLS[field][0:5]}")
686            field = list(self.noiseCLS.keys())[0]
687            print(f"Noise CLs for {field.upper()} ---> {self.noiseCLS[field][0:5]}")
688
689        # Compute the total covariance matrix
690        self.data = (
691            self.fiduCOV[:, :, self.lmin : self.lmax + 1]
692            + self.noiseCOV[:, :, self.lmin : self.lmax + 1]
693        )
694
695        # Compute the inverse of the covariance matrix
696        if self.like == "gaussian":
697            self.gauss_keys = self.get_Gauss_keys()
698            sigma2 = self.sigma(self.gauss_keys, self.fiduCLS, self.noiseCLS)
699            self.sigma2 = self.inv_sigma(sigma2)
700
701    def get_requirements(self):
702        """Defines requirements of the likelihood, specifying quantities calculated by a theory code are needed. Note that you may want to change the overall keyword from 'Cl' to 'unlensed_Cl' if you want to work without considering lensing."""
703        # The likelihood needs the lensed CMB angular power spectra. The keyword can be set to "unlensed_Cl" to get the unlensed ones
704        requitements = {}
705        requitements["Cl"] = {cl: self.lmax for cl in self.keys}
706        # If debug is set to True, the likelihood will print the list of items required by the likelihood
707        if self.debug:
708            requitements["CAMBdata"] = None
709            print(
710                f"\nYou requested that Cobaya provides to the likelihood the following items: {requitements}",
711            )
712        return requitements
713
714    def data_vector(self, cov):
715        """Get data vector from the covariance matrix.
716
717        Extracts the data vector necessary for the Gaussian case. Note that this will cut the null value since some may be null when the fields have different values for lmax.
718
719        Parameters:
720            cov (np.ndarray):
721                A ndarray containing the covariance matrices, with some null ones.
722        """
723        return cov[np.triu_indices(self.n)][cov[np.triu_indices(self.n)] != 0]
724
725    def chi_exact(self, i=0):
726        """Computes proper chi-square term for the exact likelihood case.
727
728        Parameters:
729            i (int, optional):
730                ell index if needed. Defaults to 0.
731        """
732        # If the number of datasets is not equal to 1, then we have a
733        # multi-dataset case, in which case we need to compute the
734        # covariance matrix for each dataset.
735        if self.n != 1:
736            # We extract the covariance matrix and data for the ith
737            # dataset.
738            coba = self.coba[:, :, i]
739            data = self.data[:, :, i]
740            det = np.linalg.det(coba)
741            # If the determinant is equal to 0, then we need to reduce
742            # the dimensionality of the data and covariance matrix.
743            if det == 0:
744                data = self.get_reduced_data(data)
745                coba = self.get_reduced_data(coba)
746            # We compute the matrix M using the covariance matrix and
747            # the data.
748            M = np.linalg.solve(coba, data)
749            # We compute the chi-square term using the trace of M, the
750            # log determinant of M, and the number of fields.
751            return np.trace(M) - np.linalg.slogdet(M)[1] - data.shape[0]
752        # If the number of datasets is equal to 1, then we have a single
753        # dataset case, in which case we do not need to loop over the
754        # datasets.
755        else:
756            # We compute the matrix M using the covariance matrix and
757            # the data.
758            M = self.data / self.coba
759            # We compute the chi-square term using M, the log of M, and
760            # a constant value.
761            return M - np.log(np.abs(M)) - 1
762
763    def chi_gaussian(self, i=0):
764        """Computes proper chi-square term for the Gaussian likelihood case.
765
766        Parameters:
767            i (int, optional):
768                ell index if needed. Defaults to 0.
769        """
770        # If we have more than one data vector
771        if self.n != 1:
772            coba = self.data_vector(self.coba[:, :, i])
773            data = self.data_vector(self.data[:, :, i])
774            return (coba - data) @ self.sigma2[i] @ (coba - data)
775        # If we have only one data vector
776        else:
777            coba = self.coba[0, 0, :]
778            data = self.data[0, 0, :]
779            res = (coba - data) * self.sigma2 * (coba - data)
780            return res
781
782    def compute_chi_part(self, i=0):
783        """Chooses which chi-square term to compute.
784
785        Parameters:
786            i (int, optional):
787                ell index if needed. Defaults to 0.
788        """
789        # check if the likelihood is "exact"
790        if self.like == "exact":
791            # if so, compute the chi-square term for the exact likelihood
792            return self.chi_exact(i)
793        # if not, check if it is "gaussian"
794        elif self.like == "gaussian":
795            # if so, compute the chi-square term for the gaussian likelihood
796            return self.chi_gaussian(i)
797        # if neither, print an error message
798        else:
799            print("You requested something different from 'exact or 'gaussian'!")
800            return
801
802    def log_likelihood(self):
803        """Computes the log likelihood."""
804        # Get the array of multipoles
805        ell = np.arange(self.lmin, self.lmax + 1, 1)
806        # Compute the log likelihood for each multipole
807        if self.n != 1:
808            logp_ℓ = np.zeros(ell.shape)
809            for i in range(0, self.lmax + 1 - self.lmin):
810                logp_ℓ[i] = -0.5 * (2 * ell[i] + 1) * self.compute_chi_part(i)
811        else:
812            logp_ℓ = -0.5 * (2 * ell + 1) * self.compute_chi_part()
813        # Sum the log likelihood over multipoles
814        return np.sum(logp_ℓ)
815
816    def logp(self, **params_values):
817        """Gets the log likelihood and pass it to Cobaya to carry on the MCMC process."""
818        if self.debug:
819            CAMBdata = self.provider.get_CAMBdata()
820            pars = CAMBdata.Params
821            print(pars)
822
823        # Get the Cls from Cobaya
824        self.cobaCLs = self.provider.get_Cl(ell_factor=True)
825
826        if self.debug:
827            print(f"Keys of Cobaya CLs ---> {self.cobaCLs.keys()}")
828
829            field = list(self.cobaCLs.keys())[0]
830            print("\nPrinting the first few values to check that it starts from 0...")
831            print(f"Cobaya CLs for {field.upper()} ---> {self.cobaCLs[field][0:5]}")
832
833        # Fill the covariance matrix with the Cls from Cobaya
834        self.cobaCOV = self.cov_filling(self.cobaCLs)
835
836        if self.debug:
837            ell = np.arange(0, self.lmax + 1, 1)
838            plt.loglog(ell, self.fiduCOV[0, 0, :], label="Fiducial CLs")
839            plt.loglog(ell, self.cobaCOV[0, 0, :], label="Cobaya CLs", ls="--")
840            plt.loglog(ell, self.noiseCOV[0, 0, :], label="Noise CLs")
841            plt.xlim(2, None)
842            plt.legend()
843            plt.show()
844
845        # Add the noise covariance to the covariance matrix filled with the Cls from Cobaya
846        self.coba = (
847            self.cobaCOV[:, :, self.lmin : self.lmax + 1]
848            + self.noiseCOV[:, :, self.lmin : self.lmax + 1]
849        )
850
851        # Compute the likelihood
852        logp = self.log_likelihood()
853
854        if self.debug:
855            print(logp)
856            exit()
857
858        return logp
859
860
861__all__ = ["LiLit"]
862
863__docformat__ = "google"
864__pdoc__ = {}
865__pdoc__[
866    "Likelihood"
867] = "Likelihood class from Cobaya, refer to Cobaya documentation for more information."
class LiLit(cobaya.likelihood.Likelihood):
 10class LiLit(Likelihood):
 11
 12    """Class defining the Likelihood for LiteBIRD (LiLit).
 13
 14    Within LiLit, the most relevant study cases of LiteBIRD (T, E, B) are already tested and working. So, if you need to work with those, you should not need to look into the actual definition of the likelihood function, since you can proptly start running your MCMCs. Despite this, you should provide to the likelihood some file where to find the proper LiteBIRD noise power spectra, given that LiLit is implementing a simple inverse noise weighting just as a place-holder for something more realistic. As regards lensing, LiLit will need you to pass the reconstruction noise, since its computation is not coded, thus there is no place-holder for lensing.
 15
 16    Parameters:
 17        name (str):
 18            The name for the likelihood, used in the output. It is necessary to pass it to LiLit. (default: None).
 19        fields (list):
 20            List of fields in the data file (default: None).
 21        lmax (int or list):
 22            Maximum multipole to consider (default: None).
 23        like (str, optional):
 24            Type of likelihood to use (default: "exact"). Currently supports "exact" and "gaussian".
 25        lmin (int or list):
 26            Minimum multipole to consider (default: 2).
 27        cl_file (str, optional):
 28            Path to Cl file (default: None).
 29        nl_file (str, optional):
 30            Path to noise file (default: None).
 31        mapping (dict, optional):
 32            Dictionary of mapping between the fields in the noise file and the fields in the likelihood, used only if nl_file has .txt extension (default: None).
 33        experiment (str, optional):
 34            Name of experiment (default: None).
 35        nside (int, optional):
 36            Nside of the map (default: None).
 37        r (float, optional):
 38            Tensor-to-scalar ratio (default: None).
 39        nt (float, optional):
 40            Tensor spectral tilt (default: None).
 41        pivot_t (float, optional):
 42            Pivot scale of the tensor primordial power spectrum (default: 0.01).
 43        fsky (float or list):
 44            Sky fraction (default: 1).
 45        sep (str, optional):
 46            Separator used in the data file (default: "").
 47        debug (bool, optional):
 48            If True, produces more verbose output (default: None).
 49
 50
 51    Attributes:
 52        fields (list):
 53            List of fields in the data file.
 54        n_fields (int):
 55            Number of fields.
 56        keys (list):
 57            List of keywords for the dictionaries.
 58        gauss_keys (list):
 59            List of keywords for the Gaussian likelihood (4-points).
 60        sigma2 (np.ndarray):
 61            Array of covariances for the Gaussian likelihood case.
 62        lmax (int or list):
 63            List of lmax values.
 64        lmaxes (dict):
 65            Dictionary of lmax values.
 66        fsky (int or list):
 67            List of fsky values.
 68        fskies (dict):
 69            Dictionary of fsky values.
 70        lmin (int or list):
 71            Minimum multipole to consider.
 72        lmins (dict):
 73            Dictionary of lmin values.
 74        like (str):
 75            Type of likelihood to use.
 76        cl_file (str):
 77            Path to Cl file.
 78        fiduCLS (dict):
 79            Dictionary of fiducial Cls.
 80        noiseCLS (dict):
 81            Dictionary of noise Cls.
 82        fiduCOV (np.ndarray):
 83            Fiducial covariance matrix obtained from the corresponding dictionary.
 84        noiseCOV (np.ndarray):
 85            Noise covariance matrix obtained from the corresponding dictionary.
 86        data (np.ndarray):
 87            Data vector obtained by summing fiduCOV + noiseCOV.
 88        cobaCLS (dict):
 89            Dictionary of Cobaya Cls.
 90        cobaCOV (np.ndarray):
 91            Cobaya covariance matrix obtained from the corresponding dictionary.
 92        coba (np.ndarray):
 93            Cobaya vector obtained by summing cobaCOV + noiseCOV.
 94        nl_file (str):
 95            Path to noise file.
 96        mapping (dict):
 97            Dictionary of mapping between the fields in the noise file and the fields in the likelihood, used only if nl_file has .txt extension.
 98        experiment (str):
 99            Name of experiment.
100        nside (int):
101            Nside of the map.
102        r (float):
103            Tensor-to-scalar ratio.
104        nt (float):
105            Tensor spectral tilt.
106        pivot_t (float):
107            Pivot scale of the tensor primordial power spectrum.
108        sep (str):
109            Separator used in the data file.
110        debug (bool):
111            If True, produces more output.
112    """
113
114    def __init__(
115        self,
116        name=None,
117        fields=None,
118        lmax=None,
119        like="exact",
120        lmin=2,
121        cl_file=None,
122        nl_file=None,
123        mapping=None,
124        experiment=None,
125        nside=None,
126        r=None,
127        nt=None,
128        pivot_t=0.01,
129        fsky=1,
130        sep="",
131        debug=None,
132    ):
133        # Check that the user has provided the name of the likelihood
134        assert (
135            name is not None
136        ), "You must provide the name of the likelihood (e.g. 'BB' or 'TTTEEE')"
137        # Check that the user has provided the fields
138        assert (
139            fields is not None
140        ), "You must provide the fields (e.g. 'b' or ['t', 'e'])"
141        # Check that the user has provided the maximum multipole
142        assert lmax is not None, "You must provide the lmax (e.g. 300)"
143
144        self.fields = fields
145        self.n = len(fields)
146        self.lmin = lmin
147        self.like = like
148        self.sep = sep
149        self.cl_file = cl_file
150        self.nl_file = nl_file
151        if self.nl_file.endswith(".txt"):
152            self.mapping = mapping
153        self.experiment = experiment
154        if self.experiment is not None:
155            # Check that the user has provided the nside if an experiment is used
156            assert nside is not None, "You must provide an nside to compute the noise"
157            self.nside = nside
158        self.debug = debug
159        self.keys = self.get_keys()
160        if "bb" in self.keys:
161            # Check that the user has provided the tensor-to-scalar ratio if a BB likelihood is used
162            assert (
163                r is not None
164            ), "You must provide the tensor-to-scalar ratio r for the fiducial production (defaul is at 0.01 Mpc^-1)"
165            self.r = r
166            self.nt = nt
167            self.pivot_t = pivot_t
168
169        self.set_lmin_lmax_fsky(lmin, lmax, fsky)
170
171        Likelihood.__init__(self, name=name)
172
173    def set_lmin_lmax_fsky(self, lmin, lmax, fsky):
174        """Take lmin, lmax and fsky parameters and set the corresponding attributes.
175
176        Sets the minimum multipole, the maximum multipole and the sky fraction. This handles automatically the case of a single value or a list of values. Note that the lmin, lmax and fsky for the cross-correlations are set to the geometrical mean of the lmin, lmax and fsky of the two fields. This approximation has been tested and found to be accurate, at least assuming that the two masks of the two considered multipoles are very overlapped.
177
178        Parameters:
179            lmin (int or list):
180                Value or list of values of lmin.
181            lmax (int or list):
182                Value or list of values of lmax.
183            fsky (float or list):
184                Value or list of values of fsky.
185        """
186
187        self.lmins = {}
188        self.lmaxs = {}
189        self.fskies = {}
190
191        # Set lmin
192        if isinstance(lmin, list):
193            assert (
194                len(lmin) == self.n
195            ), "If you provide multiple lmin, they must match the number of requested fields with the same order"
196            for i in range(self.n):
197                for j in range(i, self.n):
198                    key = self.fields[i] + self.sep + self.fields[j]
199                    self.lmins[key] = int(
200                        np.ceil(np.sqrt(lmin[i] * lmin[j]))
201                    )  # this approximaiton allows to gain some extra multipoles in the cross-correalation for which the SNR is still good.
202                    self.lmins[key[::-1]] = int(np.ceil(np.sqrt(lmin[i] * lmin[j])))
203            self.lmin = min(lmin)
204        else:
205            self.lmin = lmin
206
207        # Set lmax
208        if isinstance(lmax, list):
209            assert (
210                len(lmax) == self.n
211            ), "If you provide multiple lmax, they must match the number of requested fields with the same order"
212            for i in range(self.n):
213                for j in range(i, self.n):
214                    key = self.fields[i] + self.sep + self.fields[j]
215                    self.lmaxs[key] = int(
216                        np.floor(np.sqrt(lmax[i] * lmax[j]))
217                    )  # this approximaiton allows to gain some extra multipoles in the cross-correalation for which the SNR is still good.
218                    self.lmaxs[key[::-1]] = int(np.floor(np.sqrt(lmax[i] * lmax[j])))
219            self.lmax = max(lmax)
220        else:
221            self.lmax = lmax
222
223        # Set fsky
224        if isinstance(fsky, list):
225            assert (
226                len(fsky) == self.n
227            ), "If you provide multiple fsky, they must match the number of requested fields with the same order"
228            for i in range(self.n):
229                for j in range(i, self.n):
230                    key = self.fields[i] + self.sep + self.fields[j]
231                    self.fskies[key] = np.sqrt(
232                        fsky[i] * fsky[j]
233                    )  # this approximation for the cross-correlation is not correct in the case of two very different masks (verified with simulations)
234                    self.fskies[key[::-1]] = np.sqrt(fsky[i] * fsky[j])
235            self.fsky = None
236        else:
237            self.fsky = fsky
238        return
239
240    def cov_filling(self, cov_dict):
241        """Fill covariance matrix with appropriate spectra.
242
243        Computes the covariance matrix once given a dictionary. Returns the covariance matrix of the considered fields, in a shape equal to (num_fields x num_fields x lmax). Note that if more than one lmax, or lmin, is specified, there will be null values in the matrices, making them singular. This will be handled in another method.
244
245        Parameters:
246            cov_dict (dict):
247                The input dictionary of spectra.
248        """
249        # Initialize output array
250        res = np.zeros((self.n, self.n, self.lmax + 1))
251
252        # Loop over field1
253        for i, field1 in enumerate(self.fields):
254            # Loop over field2
255            for j, field2 in enumerate(self.fields[i:]):
256                # Get the index of field2
257                j += i
258
259                # Get the key of the covariance matrix
260                key = field1 + self.sep + field2
261
262                # Get lmin and lmax for this field pair
263                lmin = self.lmins.get(key, self.lmin)
264                lmax = self.lmaxs.get(key, self.lmax)
265
266                # Get the covariance for this field pair
267                cov = cov_dict.get(key, np.zeros(lmax + 1))
268
269                # Set the appropriate values in the covariance matrix
270                res[i, j, lmin : lmax + 1] = cov[lmin : lmax + 1]
271                # Fill the covariance matrix symmetrically
272                res[j, i] = res[i, j]
273
274        return res
275
276    def get_keys(self):
277        """Extracts the keys that has to be used as a function of the requested fields. These will be the usual 2-points, e.g., tt, te, ee, etc."""
278        # List of all the possible combinations of the requested fields
279        res = [
280            self.fields[i] + self.sep + self.fields[j]
281            for i in range(self.n)
282            for j in range(i, self.n)
283        ]
284        # Print the requested keys
285        if self.debug:
286            print(f"\nThe requested keys are {res}")
287        return res
288
289    def get_Gauss_keys(self):
290        """Find the proper dictionary keys for the requested fields.
291
292        Extracts the keys that has to be used as a function of the requested fields for the Gaussian likelihood. Indeed, the Gaussian likelihood is computed using 4-points, so the keys are different. E.g., there will be keys such as tttt, ttee, tete, etc.
293        """
294        # Calculate the number of elements in the covariance matrix
295        n = int(self.n * (self.n + 1) / 2)
296        # Initialize a 3-d array to store the keys
297        res = np.zeros((n, n, 4), dtype=str)
298        # Loop over all the elements in the covariance matrix
299        for i in range(n):
300            for j in range(i, n):
301                # Generate a key for the i-th and j-th element
302                elem = self.keys[i] + self.sep + self.keys[j]
303                # Loop over all the characters in the key
304                for k in range(4):
305                    # Add the k-th character to the i-th, j-th, and k-th
306                    # indices of the array
307                    res[i, j, k] = np.asarray(list(elem)[k])
308                    res[j, i, k] = res[i, j, k]
309        # Print the keys if the debug flag is set
310        if self.debug:
311            print(f"\nThe requested keys are {res}")
312        # Return the keys
313        return res
314
315    def find_spectrum(self, input_dict, key):
316        """Find a spectrum in a given dictionary.
317
318        Returns the corresponding power sepctrum for a given key. If the key is not found, it will try to find the reverse key. Otherwise it will fill the array with zeros.
319
320        Parameters:
321            input_dict (dict):
322                Dictionary where you want to search for keys.
323
324            key (str):
325                Key to search for.
326        """
327        # create a zero array
328        res = np.zeros(self.lmax + 1)
329
330        # get lmin and lmax
331        lmin = self.lmins.get(key, self.lmin)
332        lmax = self.lmaxs.get(key, self.lmax)
333
334        # try to find the key in the dictionary
335        if key in input_dict:
336            cov = input_dict[key]
337        # if the key is not found, try the reverse key
338        else:
339            cov = input_dict.get(key[::-1], np.zeros(lmax + 1))
340
341        # fill the array with the requested spectrum
342        res[lmin : lmax + 1] = cov[lmin : lmax + 1]
343
344        return res
345
346    def sigma(self, keys, fiduDICT, noiseDICT):
347        """Define the covariance matrix for the Gaussian case.
348
349        In case of Gaussian likelihood, this returns the covariance matrix needed for the computation of the chi2. Note that the inversion is done in a separate funciton.
350
351        Parameters:
352            keys (dict):
353                Keys for the covariance elements.
354
355            fiduDICT (dict):
356                Dictionary with the fiducial spectra.
357
358            noiseDICT (dict):
359                Dictionary with the noise spectra.
360        """
361        # The covariance matrix has to be symmetric.
362        # The number of parameters in the likelihood is self.n.
363        # The covariance matrix is a (self.n x self.n x self.lmax+1) ndarray.
364        # We will store the covariance matrix in a (n x n x self.lmax+1) ndarray,
365        # where n = int(self.n * (self.n + 1) / 2).
366        n = int(self.n * (self.n + 1) / 2)
367        res = np.zeros((n, n, self.lmax + 1))
368        for i in range(n):  # Loop over all combinations of pairs of spectra
369            for j in range(i, n):
370                C_AC = self.find_spectrum(
371                    fiduDICT, keys[i, j, 0] + keys[i, j, 2]
372                )  # Find the fiducial spectra for each pair
373                C_BD = self.find_spectrum(fiduDICT, keys[i, j, 1] + keys[i, j, 3])
374                C_AD = self.find_spectrum(fiduDICT, keys[i, j, 0] + keys[i, j, 3])
375                C_BC = self.find_spectrum(fiduDICT, keys[i, j, 1] + keys[i, j, 2])
376                N_AC = self.find_spectrum(
377                    noiseDICT, keys[i, j, 0] + keys[i, j, 2]
378                )  # Find the noise spectra for each pair
379                N_BD = self.find_spectrum(noiseDICT, keys[i, j, 1] + keys[i, j, 3])
380                N_AD = self.find_spectrum(noiseDICT, keys[i, j, 0] + keys[i, j, 3])
381                N_BC = self.find_spectrum(noiseDICT, keys[i, j, 1] + keys[i, j, 2])
382                if self.fsky is not None:  # If self.fsky is defined, use the fsky value
383                    res[i, j] = (
384                        (C_AC + N_AC) * (C_BD + N_BD) + (C_AD + N_AD) * (C_BC + N_BC)
385                    ) / self.fsky
386                else:  # Otherwise, use the fsky values from the input spectra
387                    AC = keys[i, j, 0] + keys[i, j, 2]
388                    BD = keys[i, j, 1] + keys[i, j, 3]
389                    AD = keys[i, j, 0] + keys[i, j, 3]
390                    BC = keys[i, j, 1] + keys[i, j, 2]
391                    AB = keys[i, j, 0] + keys[i, j, 1]
392                    CD = keys[i, j, 2] + keys[i, j, 3]
393                    res[i, j] = (
394                        np.sqrt(self.fskies[AC] * self.fskies[BD])
395                        * (C_AC + N_AC)
396                        * (C_BD + N_BD)
397                        + np.sqrt(self.fskies[AD] * self.fskies[BC])
398                        * (C_AD + N_AD)
399                        * (C_BC + N_BC)
400                    ) / (self.fskies[AB] * self.fskies[CD])
401                res[j, i] = res[i, j]
402        return res
403
404    def inv_sigma(self, sigma):
405        """Invert the covariance matrix of the Gaussian case.
406
407        Inverts the previously calculated sigma ndarray. Note that some elements may be null, thus the covariance may be singular. If so, this also reduces the dimension of the matrix by deleting the corresponding row and column.
408
409        Parameters:
410            ndarray (np.ndarray):
411                (self.n x self.n x self.lmax+1) ndarray with the previously computed sigma (not inverted).
412        """
413        # Initialize array to store the inverted covariance matrices
414        res = np.zeros(self.lmax + 1, dtype=object)
415
416        # Loop over multipoles
417        for i in range(self.lmax + 1):
418            # Check if matrix is singular
419            COV = sigma[:, :, i]
420            if np.linalg.det(COV) == 0:
421                # Get indices of null diagonal elements
422                idx = np.where(np.diag(COV) == 0)[0]
423                # Remove corresponding rows and columns
424                COV = np.delete(COV, idx, axis=0)
425                COV = np.delete(COV, idx, axis=1)
426            # Invert matrix
427            res[i] = np.linalg.inv(COV)
428        return res[2:]
429
430    def get_reduced_data(self, mat):
431        """Find the reduced data eliminating the singularity of the matrix.
432
433        Cuts the row and column corresponding to a zero diagonal value. Indeed, in case of different lmax, or lmin, for the fields, you will have singular marices.
434
435        Parameters:
436            ndarray (np.ndarray):
437                A ndarray containing the covariance matrices, with some singular ones.
438        """
439        # Select the indices corresponding to the zero diagonal
440        idx = np.where(np.diag(mat) == 0)[0]
441        # Delete the rows and columns from the matrix
442        return np.delete(np.delete(mat, idx, axis=0), idx, axis=1)
443
444    def CAMBres2dict(self, camb_results):
445        """Takes the CAMB result product from get_cmb_power_spectra and convert it to a dictionary with the proper keys.
446
447        Parameters:
448            camb_results (CAMBdata):
449                CAMB result product from the method get_cmb_power_spectra.
450        """
451        # Get the number of multipoles
452        ls = np.arange(camb_results["total"].shape[0], dtype=np.int64)
453        # Mapping between the CAMB keys and the ones we want
454        mapping = {"tt": 0, "ee": 1, "bb": 2, "te": 3, "et": 3}
455        # Initialize the output dictionary
456        res = {"ell": ls}
457        # Loop over the keys we want
458        for key, i in mapping.items():
459            # Save the results
460            res[key] = camb_results["total"][:, i]
461        # Check if we want the lensing potential
462        if "pp" in self.keys:
463            # Get the lensing potential
464            cl_lens = camb_results.get("lens_potential")
465            # Check if it exists
466            if cl_lens is not None:
467                # Save it
468                res["pp"] = cl_lens[:, 0].copy()
469                # Check if we want the cross terms
470                if "pt" in self.keys and "pe" in self.keys:
471                    # Loop over the cross terms
472                    for i, cross in enumerate(["pt", "pe"]):
473                        # Save the result
474                        res[cross] = cl_lens[:, i + 1].copy()
475                        # Save the symmetric term
476                        res[cross[::-1]] = res[cross]
477        return res
478
479    def txt2dict(self, txt, mapping=None, apply_ellfactor=None):
480        """Takes a txt file and convert it to a dictionary. This requires a way to map the columns to the keys. Also, it is possible to apply an ell factor to the Cls.
481
482        Parameters:
483            txt (str):
484                Path to txt file containing the spectra as columns.
485            mapping (dict):
486                Dictionary containing the mapping. Keywords will become the new keywords and values represent the index of the corresponding column.
487        """
488        # Define the ell values from the length of the txt file
489        assert (
490            mapping is not None
491        ), "You must provide a way to map the columns of your txt to the keys of a dictionary"
492        ls = np.arange(txt.shape[0], dtype=np.int64)
493        res = {"ell": ls}
494        # Loop over the mapping and extract the corresponding column from the txt file
495        # and store it in the dictionary under the corresponding keyword
496        for key, i in mapping.items():
497            if apply_ellfactor:
498                res[key] = txt[:, i] * ls * (ls + 1) / 2 / np.pi
499            else:
500                res[key] = txt[:, i]
501        return res
502
503    def prod_fidu(self):
504        """Produce fiducial spectra or read the input ones.
505
506        If the user has not provided a Cl file, this function will produce the fiducial power spectra starting from the CAMB inifile for Planck2018. The extra keywords defined will maximize the accordance between the fiducial Cls and the ones obtained from Cobaya. If B-modes are requested, the tensor-to-scalar ratio and the spectral tilt will be set to the requested values. Note that if you do not provide a tilt, this will follow the standard single-field consistency relation. If instead you provide a custom file, stores that.
507        """
508        # If a custom file is provided, use that
509        if self.cl_file is not None:
510            # If the file is a pickle file, load it
511            if self.cl_file.endswith(".pkl"):
512                with open(self.cl_file, "rb") as pickle_file:
513                    res = pickle.load(pickle_file)
514            # Otherwise, load it as text file
515            else:
516                txt = np.loadtxt(self.cl_file)
517                mapping = {"tt": 0, "ee": 1, "bb": 2, "te": 3, "et": 3}
518                res = self.txt2dict(txt, mapping)
519            return res
520
521        try:
522            import camb
523        except ImportError:
524            print("CAMB seems to be not installed. Check the requirements.")
525
526        # Read the ini file containing the parameters for CAMB
527        path = os.path.dirname(os.path.abspath(__file__))
528        planck_path = os.path.join(path, "planck_2018.ini")
529        pars = camb.read_ini(planck_path)
530
531        if "bb" in self.keys:  # If we want to include the tensor mode
532            print(f"\nProducing fiducial spectra for r={self.r} and nt={self.nt}")
533            pars.InitPower.set_params(
534                As=2.100549e-9,
535                ns=0.9660499,
536                r=self.r,
537                nt=self.nt,
538                pivot_tensor=self.pivot_t,
539                pivot_scalar=0.05,
540                parameterization=2,
541            )
542            pars.WantTensors = True
543            pars.Accuracy.AccurateBB = True
544        pars.DoLensing = True
545        # _pars.Accuracy.AccuracyBoost = 2 # This helps getting an extra squeeze on the accordance of Cobaya and Fiducial spectra
546
547        if self.debug:
548            print(pars)
549
550        results = camb.get_results(pars)
551        res = results.get_cmb_power_spectra(
552            CMB_unit="muK",
553            lmax=self.lmax,
554            raw_cl=False,
555        )
556        return self.CAMBres2dict(res)
557
558    def prod_noise(self):
559        """Produce noise power spectra or read the input ones.
560
561        If the user has not provided a noise file, this function will produce the noise power spectra for a given experiment with inverse noise weighting of white noise in each channel (TT, EE, BB). Note that you may want to have a look at the procedure since it is merely a place-holder. Indeed, you should provide a more realistic file from which to read the noise spectra, given that inverse noise weighting severely underestimates the amount of noise. If instead you provide the proper custom file, this method stores that.
562        """
563        # If the input noise file is a pickle file, load it.
564        if self.nl_file is not None:
565            if self.nl_file.endswith(".pkl"):
566                with open(self.nl_file, "rb") as pickle_file:
567                    res = pickle.load(pickle_file)
568            # If not, load the file as a text file
569            else:
570                _txt = np.loadtxt(self.nl_file)
571                # Convert the text file to a dictionary
572                res = self.txt2dict(_txt, self.mapping, apply_ellfactor=True)
573            return res
574
575        print(
576            "***WARNING***: the inverse noise weighting performed here severely underestimates \
577            the actual noise level of LiteBIRD. You should provide an input \
578            noise power spectrum with a more realistic noise."
579        )
580
581        try:
582            import yaml
583            from yaml.loader import SafeLoader
584            import healpy as hp
585        except ImportError:
586            print("YAML or Healpy seems to be not installed. Check the requirements.")
587
588        assert (
589            self.experiment is not None
590        ), "You must specify the experiment you want to consider"
591        print(f"\nComputing noise for {self.experiment}")
592
593        path = os.path.dirname(os.path.abspath(__file__))
594        experiments_path = os.path.join(path, "experiments.yaml")
595        with open(experiments_path) as f:
596            data = yaml.load(f, Loader=SafeLoader)
597
598        # Get the instrument data from the saved data
599        instrument = data[self.experiment]
600
601        # Get the FWHM values from the instrument data
602        fwhms = np.array(instrument["fwhm"])
603
604        # Get the frequency values from the instrument data
605        freqs = np.array(instrument["frequency"])
606
607        # Get the depth values from the instrument data
608        depth_p = np.array(instrument["depth_p"])
609        depth_i = np.array(instrument["depth_i"])
610
611        # Convert the depth to a pixel value
612        depth_p /= hp.nside2resol(self.nside, arcmin=True)
613        depth_i /= hp.nside2resol(self.nside, arcmin=True)
614        depth_p = depth_p * np.sqrt(
615            hp.pixelfunc.nside2pixarea(self.nside, degrees=False),
616        )
617        depth_i = depth_i * np.sqrt(
618            hp.pixelfunc.nside2pixarea(self.nside, degrees=False),
619        )
620
621        # Get the number of frequencies
622        n_freq = len(freqs)
623
624        # Define the ell values as a numpy array
625        ell = np.arange(0, self.lmax + 1, 1)
626
627        # Define the keys for the dictionary that will be returned
628        keys = ["tt", "ee", "bb"]
629
630        sigma = np.radians(fwhms / 60.0) / np.sqrt(8.0 * np.log(2.0))
631        sigma2 = sigma**2
632
633        # Calculate the Gaussian beam function
634        g = np.exp(ell * (ell + 1) * sigma2[:, np.newaxis])
635
636        # Calculate the polarization factor
637        pol_factor = np.array(
638            [np.zeros(sigma2.shape), 2 * sigma2, 2 * sigma2, sigma2],
639        )
640
641        # Calculate the polarization factor as a function of ell
642        pol_factor = np.exp(pol_factor)
643
644        # Calculate the Gaussian beam function for each polarization
645        G = []
646        for i, arr in enumerate(pol_factor):
647            G.append(g * arr[:, np.newaxis])
648        g = np.array(G)
649
650        # Initialize the dictionary that will be returned
651        res = {key: np.zeros((n_freq, self.lmax + 1)) for key in keys}
652
653        # Calculate the unnormalized power spectra
654        res["tt"] = 1 / (g[0, :, :] * depth_i[:, np.newaxis] ** 2)
655        res["ee"] = 1 / (g[3, :, :] * depth_p[:, np.newaxis] ** 2)
656        res["bb"] = 1 / (g[3, :, :] * depth_p[:, np.newaxis] ** 2)
657
658        # Calculate the normalized power spectra
659        res["tt"] = ell * (ell + 1) / (np.sum(res["tt"], axis=0)) / 2 / np.pi
660        res["ee"] = ell * (ell + 1) / (np.sum(res["ee"], axis=0)) / 2 / np.pi
661        res["bb"] = ell * (ell + 1) / (np.sum(res["bb"], axis=0)) / 2 / np.pi
662
663        res["tt"][:2] = [0, 0]
664        res["ee"][:2] = [0, 0]
665        res["bb"][:2] = [0, 0]
666
667        return res
668
669    def initialize(self):
670        """Initializes the fiducial spectra and the noise power spectra."""
671        # Compute the fiducial and noise power spectra
672        self.fiduCLS = self.prod_fidu()
673        self.noiseCLS = self.prod_noise()
674
675        # Compute the covariance matrices
676        self.fiduCOV = self.cov_filling(self.fiduCLS)
677        self.noiseCOV = self.cov_filling(self.noiseCLS)
678
679        # Print some information for debugging
680        if self.debug:
681            print(f"Keys of fiducial CLs ---> {self.fiduCLS.keys()}")
682            print(f"Keys of noise CLs ---> {self.noiseCLS.keys()}")
683
684            print("\nPrinting the first few values to check that it starts from 0...")
685            field = list(self.fiduCLS.keys())[0]
686            print(f"Fiducial CLs for {field.upper()} ---> {self.fiduCLS[field][0:5]}")
687            field = list(self.noiseCLS.keys())[0]
688            print(f"Noise CLs for {field.upper()} ---> {self.noiseCLS[field][0:5]}")
689
690        # Compute the total covariance matrix
691        self.data = (
692            self.fiduCOV[:, :, self.lmin : self.lmax + 1]
693            + self.noiseCOV[:, :, self.lmin : self.lmax + 1]
694        )
695
696        # Compute the inverse of the covariance matrix
697        if self.like == "gaussian":
698            self.gauss_keys = self.get_Gauss_keys()
699            sigma2 = self.sigma(self.gauss_keys, self.fiduCLS, self.noiseCLS)
700            self.sigma2 = self.inv_sigma(sigma2)
701
702    def get_requirements(self):
703        """Defines requirements of the likelihood, specifying quantities calculated by a theory code are needed. Note that you may want to change the overall keyword from 'Cl' to 'unlensed_Cl' if you want to work without considering lensing."""
704        # The likelihood needs the lensed CMB angular power spectra. The keyword can be set to "unlensed_Cl" to get the unlensed ones
705        requitements = {}
706        requitements["Cl"] = {cl: self.lmax for cl in self.keys}
707        # If debug is set to True, the likelihood will print the list of items required by the likelihood
708        if self.debug:
709            requitements["CAMBdata"] = None
710            print(
711                f"\nYou requested that Cobaya provides to the likelihood the following items: {requitements}",
712            )
713        return requitements
714
715    def data_vector(self, cov):
716        """Get data vector from the covariance matrix.
717
718        Extracts the data vector necessary for the Gaussian case. Note that this will cut the null value since some may be null when the fields have different values for lmax.
719
720        Parameters:
721            cov (np.ndarray):
722                A ndarray containing the covariance matrices, with some null ones.
723        """
724        return cov[np.triu_indices(self.n)][cov[np.triu_indices(self.n)] != 0]
725
726    def chi_exact(self, i=0):
727        """Computes proper chi-square term for the exact likelihood case.
728
729        Parameters:
730            i (int, optional):
731                ell index if needed. Defaults to 0.
732        """
733        # If the number of datasets is not equal to 1, then we have a
734        # multi-dataset case, in which case we need to compute the
735        # covariance matrix for each dataset.
736        if self.n != 1:
737            # We extract the covariance matrix and data for the ith
738            # dataset.
739            coba = self.coba[:, :, i]
740            data = self.data[:, :, i]
741            det = np.linalg.det(coba)
742            # If the determinant is equal to 0, then we need to reduce
743            # the dimensionality of the data and covariance matrix.
744            if det == 0:
745                data = self.get_reduced_data(data)
746                coba = self.get_reduced_data(coba)
747            # We compute the matrix M using the covariance matrix and
748            # the data.
749            M = np.linalg.solve(coba, data)
750            # We compute the chi-square term using the trace of M, the
751            # log determinant of M, and the number of fields.
752            return np.trace(M) - np.linalg.slogdet(M)[1] - data.shape[0]
753        # If the number of datasets is equal to 1, then we have a single
754        # dataset case, in which case we do not need to loop over the
755        # datasets.
756        else:
757            # We compute the matrix M using the covariance matrix and
758            # the data.
759            M = self.data / self.coba
760            # We compute the chi-square term using M, the log of M, and
761            # a constant value.
762            return M - np.log(np.abs(M)) - 1
763
764    def chi_gaussian(self, i=0):
765        """Computes proper chi-square term for the Gaussian likelihood case.
766
767        Parameters:
768            i (int, optional):
769                ell index if needed. Defaults to 0.
770        """
771        # If we have more than one data vector
772        if self.n != 1:
773            coba = self.data_vector(self.coba[:, :, i])
774            data = self.data_vector(self.data[:, :, i])
775            return (coba - data) @ self.sigma2[i] @ (coba - data)
776        # If we have only one data vector
777        else:
778            coba = self.coba[0, 0, :]
779            data = self.data[0, 0, :]
780            res = (coba - data) * self.sigma2 * (coba - data)
781            return res
782
783    def compute_chi_part(self, i=0):
784        """Chooses which chi-square term to compute.
785
786        Parameters:
787            i (int, optional):
788                ell index if needed. Defaults to 0.
789        """
790        # check if the likelihood is "exact"
791        if self.like == "exact":
792            # if so, compute the chi-square term for the exact likelihood
793            return self.chi_exact(i)
794        # if not, check if it is "gaussian"
795        elif self.like == "gaussian":
796            # if so, compute the chi-square term for the gaussian likelihood
797            return self.chi_gaussian(i)
798        # if neither, print an error message
799        else:
800            print("You requested something different from 'exact or 'gaussian'!")
801            return
802
803    def log_likelihood(self):
804        """Computes the log likelihood."""
805        # Get the array of multipoles
806        ell = np.arange(self.lmin, self.lmax + 1, 1)
807        # Compute the log likelihood for each multipole
808        if self.n != 1:
809            logp_ℓ = np.zeros(ell.shape)
810            for i in range(0, self.lmax + 1 - self.lmin):
811                logp_ℓ[i] = -0.5 * (2 * ell[i] + 1) * self.compute_chi_part(i)
812        else:
813            logp_ℓ = -0.5 * (2 * ell + 1) * self.compute_chi_part()
814        # Sum the log likelihood over multipoles
815        return np.sum(logp_ℓ)
816
817    def logp(self, **params_values):
818        """Gets the log likelihood and pass it to Cobaya to carry on the MCMC process."""
819        if self.debug:
820            CAMBdata = self.provider.get_CAMBdata()
821            pars = CAMBdata.Params
822            print(pars)
823
824        # Get the Cls from Cobaya
825        self.cobaCLs = self.provider.get_Cl(ell_factor=True)
826
827        if self.debug:
828            print(f"Keys of Cobaya CLs ---> {self.cobaCLs.keys()}")
829
830            field = list(self.cobaCLs.keys())[0]
831            print("\nPrinting the first few values to check that it starts from 0...")
832            print(f"Cobaya CLs for {field.upper()} ---> {self.cobaCLs[field][0:5]}")
833
834        # Fill the covariance matrix with the Cls from Cobaya
835        self.cobaCOV = self.cov_filling(self.cobaCLs)
836
837        if self.debug:
838            ell = np.arange(0, self.lmax + 1, 1)
839            plt.loglog(ell, self.fiduCOV[0, 0, :], label="Fiducial CLs")
840            plt.loglog(ell, self.cobaCOV[0, 0, :], label="Cobaya CLs", ls="--")
841            plt.loglog(ell, self.noiseCOV[0, 0, :], label="Noise CLs")
842            plt.xlim(2, None)
843            plt.legend()
844            plt.show()
845
846        # Add the noise covariance to the covariance matrix filled with the Cls from Cobaya
847        self.coba = (
848            self.cobaCOV[:, :, self.lmin : self.lmax + 1]
849            + self.noiseCOV[:, :, self.lmin : self.lmax + 1]
850        )
851
852        # Compute the likelihood
853        logp = self.log_likelihood()
854
855        if self.debug:
856            print(logp)
857            exit()
858
859        return logp

Class defining the Likelihood for LiteBIRD (LiLit).

Within LiLit, the most relevant study cases of LiteBIRD (T, E, B) are already tested and working. So, if you need to work with those, you should not need to look into the actual definition of the likelihood function, since you can proptly start running your MCMCs. Despite this, you should provide to the likelihood some file where to find the proper LiteBIRD noise power spectra, given that LiLit is implementing a simple inverse noise weighting just as a place-holder for something more realistic. As regards lensing, LiLit will need you to pass the reconstruction noise, since its computation is not coded, thus there is no place-holder for lensing.

Arguments:
  • name (str): The name for the likelihood, used in the output. It is necessary to pass it to LiLit. (default: None).
  • fields (list): List of fields in the data file (default: None).
  • lmax (int or list): Maximum multipole to consider (default: None).
  • like (str, optional): Type of likelihood to use (default: "exact"). Currently supports "exact" and "gaussian".
  • lmin (int or list): Minimum multipole to consider (default: 2).
  • cl_file (str, optional): Path to Cl file (default: None).
  • nl_file (str, optional): Path to noise file (default: None).
  • mapping (dict, optional): Dictionary of mapping between the fields in the noise file and the fields in the likelihood, used only if nl_file has .txt extension (default: None).
  • experiment (str, optional): Name of experiment (default: None).
  • nside (int, optional): Nside of the map (default: None).
  • r (float, optional): Tensor-to-scalar ratio (default: None).
  • nt (float, optional): Tensor spectral tilt (default: None).
  • pivot_t (float, optional): Pivot scale of the tensor primordial power spectrum (default: 0.01).
  • fsky (float or list): Sky fraction (default: 1).
  • sep (str, optional): Separator used in the data file (default: "").
  • debug (bool, optional): If True, produces more verbose output (default: None).
Attributes:
  • fields (list): List of fields in the data file.
  • n_fields (int): Number of fields.
  • keys (list): List of keywords for the dictionaries.
  • gauss_keys (list): List of keywords for the Gaussian likelihood (4-points).
  • sigma2 (np.ndarray): Array of covariances for the Gaussian likelihood case.
  • lmax (int or list): List of lmax values.
  • lmaxes (dict): Dictionary of lmax values.
  • fsky (int or list): List of fsky values.
  • fskies (dict): Dictionary of fsky values.
  • lmin (int or list): Minimum multipole to consider.
  • lmins (dict): Dictionary of lmin values.
  • like (str): Type of likelihood to use.
  • cl_file (str): Path to Cl file.
  • fiduCLS (dict): Dictionary of fiducial Cls.
  • noiseCLS (dict): Dictionary of noise Cls.
  • fiduCOV (np.ndarray): Fiducial covariance matrix obtained from the corresponding dictionary.
  • noiseCOV (np.ndarray): Noise covariance matrix obtained from the corresponding dictionary.
  • data (np.ndarray): Data vector obtained by summing fiduCOV + noiseCOV.
  • cobaCLS (dict): Dictionary of Cobaya Cls.
  • cobaCOV (np.ndarray): Cobaya covariance matrix obtained from the corresponding dictionary.
  • coba (np.ndarray): Cobaya vector obtained by summing cobaCOV + noiseCOV.
  • nl_file (str): Path to noise file.
  • mapping (dict): Dictionary of mapping between the fields in the noise file and the fields in the likelihood, used only if nl_file has .txt extension.
  • experiment (str): Name of experiment.
  • nside (int): Nside of the map.
  • r (float): Tensor-to-scalar ratio.
  • nt (float): Tensor spectral tilt.
  • pivot_t (float): Pivot scale of the tensor primordial power spectrum.
  • sep (str): Separator used in the data file.
  • debug (bool): If True, produces more output.
LiLit( name=None, fields=None, lmax=None, like='exact', lmin=2, cl_file=None, nl_file=None, mapping=None, experiment=None, nside=None, r=None, nt=None, pivot_t=0.01, fsky=1, sep='', debug=None)
114    def __init__(
115        self,
116        name=None,
117        fields=None,
118        lmax=None,
119        like="exact",
120        lmin=2,
121        cl_file=None,
122        nl_file=None,
123        mapping=None,
124        experiment=None,
125        nside=None,
126        r=None,
127        nt=None,
128        pivot_t=0.01,
129        fsky=1,
130        sep="",
131        debug=None,
132    ):
133        # Check that the user has provided the name of the likelihood
134        assert (
135            name is not None
136        ), "You must provide the name of the likelihood (e.g. 'BB' or 'TTTEEE')"
137        # Check that the user has provided the fields
138        assert (
139            fields is not None
140        ), "You must provide the fields (e.g. 'b' or ['t', 'e'])"
141        # Check that the user has provided the maximum multipole
142        assert lmax is not None, "You must provide the lmax (e.g. 300)"
143
144        self.fields = fields
145        self.n = len(fields)
146        self.lmin = lmin
147        self.like = like
148        self.sep = sep
149        self.cl_file = cl_file
150        self.nl_file = nl_file
151        if self.nl_file.endswith(".txt"):
152            self.mapping = mapping
153        self.experiment = experiment
154        if self.experiment is not None:
155            # Check that the user has provided the nside if an experiment is used
156            assert nside is not None, "You must provide an nside to compute the noise"
157            self.nside = nside
158        self.debug = debug
159        self.keys = self.get_keys()
160        if "bb" in self.keys:
161            # Check that the user has provided the tensor-to-scalar ratio if a BB likelihood is used
162            assert (
163                r is not None
164            ), "You must provide the tensor-to-scalar ratio r for the fiducial production (defaul is at 0.01 Mpc^-1)"
165            self.r = r
166            self.nt = nt
167            self.pivot_t = pivot_t
168
169        self.set_lmin_lmax_fsky(lmin, lmax, fsky)
170
171        Likelihood.__init__(self, name=name)
def set_lmin_lmax_fsky(self, lmin, lmax, fsky):
173    def set_lmin_lmax_fsky(self, lmin, lmax, fsky):
174        """Take lmin, lmax and fsky parameters and set the corresponding attributes.
175
176        Sets the minimum multipole, the maximum multipole and the sky fraction. This handles automatically the case of a single value or a list of values. Note that the lmin, lmax and fsky for the cross-correlations are set to the geometrical mean of the lmin, lmax and fsky of the two fields. This approximation has been tested and found to be accurate, at least assuming that the two masks of the two considered multipoles are very overlapped.
177
178        Parameters:
179            lmin (int or list):
180                Value or list of values of lmin.
181            lmax (int or list):
182                Value or list of values of lmax.
183            fsky (float or list):
184                Value or list of values of fsky.
185        """
186
187        self.lmins = {}
188        self.lmaxs = {}
189        self.fskies = {}
190
191        # Set lmin
192        if isinstance(lmin, list):
193            assert (
194                len(lmin) == self.n
195            ), "If you provide multiple lmin, they must match the number of requested fields with the same order"
196            for i in range(self.n):
197                for j in range(i, self.n):
198                    key = self.fields[i] + self.sep + self.fields[j]
199                    self.lmins[key] = int(
200                        np.ceil(np.sqrt(lmin[i] * lmin[j]))
201                    )  # this approximaiton allows to gain some extra multipoles in the cross-correalation for which the SNR is still good.
202                    self.lmins[key[::-1]] = int(np.ceil(np.sqrt(lmin[i] * lmin[j])))
203            self.lmin = min(lmin)
204        else:
205            self.lmin = lmin
206
207        # Set lmax
208        if isinstance(lmax, list):
209            assert (
210                len(lmax) == self.n
211            ), "If you provide multiple lmax, they must match the number of requested fields with the same order"
212            for i in range(self.n):
213                for j in range(i, self.n):
214                    key = self.fields[i] + self.sep + self.fields[j]
215                    self.lmaxs[key] = int(
216                        np.floor(np.sqrt(lmax[i] * lmax[j]))
217                    )  # this approximaiton allows to gain some extra multipoles in the cross-correalation for which the SNR is still good.
218                    self.lmaxs[key[::-1]] = int(np.floor(np.sqrt(lmax[i] * lmax[j])))
219            self.lmax = max(lmax)
220        else:
221            self.lmax = lmax
222
223        # Set fsky
224        if isinstance(fsky, list):
225            assert (
226                len(fsky) == self.n
227            ), "If you provide multiple fsky, they must match the number of requested fields with the same order"
228            for i in range(self.n):
229                for j in range(i, self.n):
230                    key = self.fields[i] + self.sep + self.fields[j]
231                    self.fskies[key] = np.sqrt(
232                        fsky[i] * fsky[j]
233                    )  # this approximation for the cross-correlation is not correct in the case of two very different masks (verified with simulations)
234                    self.fskies[key[::-1]] = np.sqrt(fsky[i] * fsky[j])
235            self.fsky = None
236        else:
237            self.fsky = fsky
238        return

Take lmin, lmax and fsky parameters and set the corresponding attributes.

Sets the minimum multipole, the maximum multipole and the sky fraction. This handles automatically the case of a single value or a list of values. Note that the lmin, lmax and fsky for the cross-correlations are set to the geometrical mean of the lmin, lmax and fsky of the two fields. This approximation has been tested and found to be accurate, at least assuming that the two masks of the two considered multipoles are very overlapped.

Arguments:
  • lmin (int or list): Value or list of values of lmin.
  • lmax (int or list): Value or list of values of lmax.
  • fsky (float or list): Value or list of values of fsky.
def cov_filling(self, cov_dict):
240    def cov_filling(self, cov_dict):
241        """Fill covariance matrix with appropriate spectra.
242
243        Computes the covariance matrix once given a dictionary. Returns the covariance matrix of the considered fields, in a shape equal to (num_fields x num_fields x lmax). Note that if more than one lmax, or lmin, is specified, there will be null values in the matrices, making them singular. This will be handled in another method.
244
245        Parameters:
246            cov_dict (dict):
247                The input dictionary of spectra.
248        """
249        # Initialize output array
250        res = np.zeros((self.n, self.n, self.lmax + 1))
251
252        # Loop over field1
253        for i, field1 in enumerate(self.fields):
254            # Loop over field2
255            for j, field2 in enumerate(self.fields[i:]):
256                # Get the index of field2
257                j += i
258
259                # Get the key of the covariance matrix
260                key = field1 + self.sep + field2
261
262                # Get lmin and lmax for this field pair
263                lmin = self.lmins.get(key, self.lmin)
264                lmax = self.lmaxs.get(key, self.lmax)
265
266                # Get the covariance for this field pair
267                cov = cov_dict.get(key, np.zeros(lmax + 1))
268
269                # Set the appropriate values in the covariance matrix
270                res[i, j, lmin : lmax + 1] = cov[lmin : lmax + 1]
271                # Fill the covariance matrix symmetrically
272                res[j, i] = res[i, j]
273
274        return res

Fill covariance matrix with appropriate spectra.

Computes the covariance matrix once given a dictionary. Returns the covariance matrix of the considered fields, in a shape equal to (num_fields x num_fields x lmax). Note that if more than one lmax, or lmin, is specified, there will be null values in the matrices, making them singular. This will be handled in another method.

Arguments:
  • cov_dict (dict): The input dictionary of spectra.
def get_keys(self):
276    def get_keys(self):
277        """Extracts the keys that has to be used as a function of the requested fields. These will be the usual 2-points, e.g., tt, te, ee, etc."""
278        # List of all the possible combinations of the requested fields
279        res = [
280            self.fields[i] + self.sep + self.fields[j]
281            for i in range(self.n)
282            for j in range(i, self.n)
283        ]
284        # Print the requested keys
285        if self.debug:
286            print(f"\nThe requested keys are {res}")
287        return res

Extracts the keys that has to be used as a function of the requested fields. These will be the usual 2-points, e.g., tt, te, ee, etc.

def get_Gauss_keys(self):
289    def get_Gauss_keys(self):
290        """Find the proper dictionary keys for the requested fields.
291
292        Extracts the keys that has to be used as a function of the requested fields for the Gaussian likelihood. Indeed, the Gaussian likelihood is computed using 4-points, so the keys are different. E.g., there will be keys such as tttt, ttee, tete, etc.
293        """
294        # Calculate the number of elements in the covariance matrix
295        n = int(self.n * (self.n + 1) / 2)
296        # Initialize a 3-d array to store the keys
297        res = np.zeros((n, n, 4), dtype=str)
298        # Loop over all the elements in the covariance matrix
299        for i in range(n):
300            for j in range(i, n):
301                # Generate a key for the i-th and j-th element
302                elem = self.keys[i] + self.sep + self.keys[j]
303                # Loop over all the characters in the key
304                for k in range(4):
305                    # Add the k-th character to the i-th, j-th, and k-th
306                    # indices of the array
307                    res[i, j, k] = np.asarray(list(elem)[k])
308                    res[j, i, k] = res[i, j, k]
309        # Print the keys if the debug flag is set
310        if self.debug:
311            print(f"\nThe requested keys are {res}")
312        # Return the keys
313        return res

Find the proper dictionary keys for the requested fields.

Extracts the keys that has to be used as a function of the requested fields for the Gaussian likelihood. Indeed, the Gaussian likelihood is computed using 4-points, so the keys are different. E.g., there will be keys such as tttt, ttee, tete, etc.

def find_spectrum(self, input_dict, key):
315    def find_spectrum(self, input_dict, key):
316        """Find a spectrum in a given dictionary.
317
318        Returns the corresponding power sepctrum for a given key. If the key is not found, it will try to find the reverse key. Otherwise it will fill the array with zeros.
319
320        Parameters:
321            input_dict (dict):
322                Dictionary where you want to search for keys.
323
324            key (str):
325                Key to search for.
326        """
327        # create a zero array
328        res = np.zeros(self.lmax + 1)
329
330        # get lmin and lmax
331        lmin = self.lmins.get(key, self.lmin)
332        lmax = self.lmaxs.get(key, self.lmax)
333
334        # try to find the key in the dictionary
335        if key in input_dict:
336            cov = input_dict[key]
337        # if the key is not found, try the reverse key
338        else:
339            cov = input_dict.get(key[::-1], np.zeros(lmax + 1))
340
341        # fill the array with the requested spectrum
342        res[lmin : lmax + 1] = cov[lmin : lmax + 1]
343
344        return res

Find a spectrum in a given dictionary.

Returns the corresponding power sepctrum for a given key. If the key is not found, it will try to find the reverse key. Otherwise it will fill the array with zeros.

Arguments:
  • input_dict (dict): Dictionary where you want to search for keys.
  • key (str): Key to search for.
def sigma(self, keys, fiduDICT, noiseDICT):
346    def sigma(self, keys, fiduDICT, noiseDICT):
347        """Define the covariance matrix for the Gaussian case.
348
349        In case of Gaussian likelihood, this returns the covariance matrix needed for the computation of the chi2. Note that the inversion is done in a separate funciton.
350
351        Parameters:
352            keys (dict):
353                Keys for the covariance elements.
354
355            fiduDICT (dict):
356                Dictionary with the fiducial spectra.
357
358            noiseDICT (dict):
359                Dictionary with the noise spectra.
360        """
361        # The covariance matrix has to be symmetric.
362        # The number of parameters in the likelihood is self.n.
363        # The covariance matrix is a (self.n x self.n x self.lmax+1) ndarray.
364        # We will store the covariance matrix in a (n x n x self.lmax+1) ndarray,
365        # where n = int(self.n * (self.n + 1) / 2).
366        n = int(self.n * (self.n + 1) / 2)
367        res = np.zeros((n, n, self.lmax + 1))
368        for i in range(n):  # Loop over all combinations of pairs of spectra
369            for j in range(i, n):
370                C_AC = self.find_spectrum(
371                    fiduDICT, keys[i, j, 0] + keys[i, j, 2]
372                )  # Find the fiducial spectra for each pair
373                C_BD = self.find_spectrum(fiduDICT, keys[i, j, 1] + keys[i, j, 3])
374                C_AD = self.find_spectrum(fiduDICT, keys[i, j, 0] + keys[i, j, 3])
375                C_BC = self.find_spectrum(fiduDICT, keys[i, j, 1] + keys[i, j, 2])
376                N_AC = self.find_spectrum(
377                    noiseDICT, keys[i, j, 0] + keys[i, j, 2]
378                )  # Find the noise spectra for each pair
379                N_BD = self.find_spectrum(noiseDICT, keys[i, j, 1] + keys[i, j, 3])
380                N_AD = self.find_spectrum(noiseDICT, keys[i, j, 0] + keys[i, j, 3])
381                N_BC = self.find_spectrum(noiseDICT, keys[i, j, 1] + keys[i, j, 2])
382                if self.fsky is not None:  # If self.fsky is defined, use the fsky value
383                    res[i, j] = (
384                        (C_AC + N_AC) * (C_BD + N_BD) + (C_AD + N_AD) * (C_BC + N_BC)
385                    ) / self.fsky
386                else:  # Otherwise, use the fsky values from the input spectra
387                    AC = keys[i, j, 0] + keys[i, j, 2]
388                    BD = keys[i, j, 1] + keys[i, j, 3]
389                    AD = keys[i, j, 0] + keys[i, j, 3]
390                    BC = keys[i, j, 1] + keys[i, j, 2]
391                    AB = keys[i, j, 0] + keys[i, j, 1]
392                    CD = keys[i, j, 2] + keys[i, j, 3]
393                    res[i, j] = (
394                        np.sqrt(self.fskies[AC] * self.fskies[BD])
395                        * (C_AC + N_AC)
396                        * (C_BD + N_BD)
397                        + np.sqrt(self.fskies[AD] * self.fskies[BC])
398                        * (C_AD + N_AD)
399                        * (C_BC + N_BC)
400                    ) / (self.fskies[AB] * self.fskies[CD])
401                res[j, i] = res[i, j]
402        return res

Define the covariance matrix for the Gaussian case.

In case of Gaussian likelihood, this returns the covariance matrix needed for the computation of the chi2. Note that the inversion is done in a separate funciton.

Arguments:
  • keys (dict): Keys for the covariance elements.
  • fiduDICT (dict): Dictionary with the fiducial spectra.
  • noiseDICT (dict): Dictionary with the noise spectra.
def inv_sigma(self, sigma):
404    def inv_sigma(self, sigma):
405        """Invert the covariance matrix of the Gaussian case.
406
407        Inverts the previously calculated sigma ndarray. Note that some elements may be null, thus the covariance may be singular. If so, this also reduces the dimension of the matrix by deleting the corresponding row and column.
408
409        Parameters:
410            ndarray (np.ndarray):
411                (self.n x self.n x self.lmax+1) ndarray with the previously computed sigma (not inverted).
412        """
413        # Initialize array to store the inverted covariance matrices
414        res = np.zeros(self.lmax + 1, dtype=object)
415
416        # Loop over multipoles
417        for i in range(self.lmax + 1):
418            # Check if matrix is singular
419            COV = sigma[:, :, i]
420            if np.linalg.det(COV) == 0:
421                # Get indices of null diagonal elements
422                idx = np.where(np.diag(COV) == 0)[0]
423                # Remove corresponding rows and columns
424                COV = np.delete(COV, idx, axis=0)
425                COV = np.delete(COV, idx, axis=1)
426            # Invert matrix
427            res[i] = np.linalg.inv(COV)
428        return res[2:]

Invert the covariance matrix of the Gaussian case.

Inverts the previously calculated sigma ndarray. Note that some elements may be null, thus the covariance may be singular. If so, this also reduces the dimension of the matrix by deleting the corresponding row and column.

Arguments:
  • ndarray (np.ndarray): (self.n x self.n x self.lmax+1) ndarray with the previously computed sigma (not inverted).
def get_reduced_data(self, mat):
430    def get_reduced_data(self, mat):
431        """Find the reduced data eliminating the singularity of the matrix.
432
433        Cuts the row and column corresponding to a zero diagonal value. Indeed, in case of different lmax, or lmin, for the fields, you will have singular marices.
434
435        Parameters:
436            ndarray (np.ndarray):
437                A ndarray containing the covariance matrices, with some singular ones.
438        """
439        # Select the indices corresponding to the zero diagonal
440        idx = np.where(np.diag(mat) == 0)[0]
441        # Delete the rows and columns from the matrix
442        return np.delete(np.delete(mat, idx, axis=0), idx, axis=1)

Find the reduced data eliminating the singularity of the matrix.

Cuts the row and column corresponding to a zero diagonal value. Indeed, in case of different lmax, or lmin, for the fields, you will have singular marices.

Arguments:
  • ndarray (np.ndarray): A ndarray containing the covariance matrices, with some singular ones.
def CAMBres2dict(self, camb_results):
444    def CAMBres2dict(self, camb_results):
445        """Takes the CAMB result product from get_cmb_power_spectra and convert it to a dictionary with the proper keys.
446
447        Parameters:
448            camb_results (CAMBdata):
449                CAMB result product from the method get_cmb_power_spectra.
450        """
451        # Get the number of multipoles
452        ls = np.arange(camb_results["total"].shape[0], dtype=np.int64)
453        # Mapping between the CAMB keys and the ones we want
454        mapping = {"tt": 0, "ee": 1, "bb": 2, "te": 3, "et": 3}
455        # Initialize the output dictionary
456        res = {"ell": ls}
457        # Loop over the keys we want
458        for key, i in mapping.items():
459            # Save the results
460            res[key] = camb_results["total"][:, i]
461        # Check if we want the lensing potential
462        if "pp" in self.keys:
463            # Get the lensing potential
464            cl_lens = camb_results.get("lens_potential")
465            # Check if it exists
466            if cl_lens is not None:
467                # Save it
468                res["pp"] = cl_lens[:, 0].copy()
469                # Check if we want the cross terms
470                if "pt" in self.keys and "pe" in self.keys:
471                    # Loop over the cross terms
472                    for i, cross in enumerate(["pt", "pe"]):
473                        # Save the result
474                        res[cross] = cl_lens[:, i + 1].copy()
475                        # Save the symmetric term
476                        res[cross[::-1]] = res[cross]
477        return res

Takes the CAMB result product from get_cmb_power_spectra and convert it to a dictionary with the proper keys.

Arguments:
  • camb_results (CAMBdata): CAMB result product from the method get_cmb_power_spectra.
def txt2dict(self, txt, mapping=None, apply_ellfactor=None):
479    def txt2dict(self, txt, mapping=None, apply_ellfactor=None):
480        """Takes a txt file and convert it to a dictionary. This requires a way to map the columns to the keys. Also, it is possible to apply an ell factor to the Cls.
481
482        Parameters:
483            txt (str):
484                Path to txt file containing the spectra as columns.
485            mapping (dict):
486                Dictionary containing the mapping. Keywords will become the new keywords and values represent the index of the corresponding column.
487        """
488        # Define the ell values from the length of the txt file
489        assert (
490            mapping is not None
491        ), "You must provide a way to map the columns of your txt to the keys of a dictionary"
492        ls = np.arange(txt.shape[0], dtype=np.int64)
493        res = {"ell": ls}
494        # Loop over the mapping and extract the corresponding column from the txt file
495        # and store it in the dictionary under the corresponding keyword
496        for key, i in mapping.items():
497            if apply_ellfactor:
498                res[key] = txt[:, i] * ls * (ls + 1) / 2 / np.pi
499            else:
500                res[key] = txt[:, i]
501        return res

Takes a txt file and convert it to a dictionary. This requires a way to map the columns to the keys. Also, it is possible to apply an ell factor to the Cls.

Arguments:
  • txt (str): Path to txt file containing the spectra as columns.
  • mapping (dict): Dictionary containing the mapping. Keywords will become the new keywords and values represent the index of the corresponding column.
def prod_fidu(self):
503    def prod_fidu(self):
504        """Produce fiducial spectra or read the input ones.
505
506        If the user has not provided a Cl file, this function will produce the fiducial power spectra starting from the CAMB inifile for Planck2018. The extra keywords defined will maximize the accordance between the fiducial Cls and the ones obtained from Cobaya. If B-modes are requested, the tensor-to-scalar ratio and the spectral tilt will be set to the requested values. Note that if you do not provide a tilt, this will follow the standard single-field consistency relation. If instead you provide a custom file, stores that.
507        """
508        # If a custom file is provided, use that
509        if self.cl_file is not None:
510            # If the file is a pickle file, load it
511            if self.cl_file.endswith(".pkl"):
512                with open(self.cl_file, "rb") as pickle_file:
513                    res = pickle.load(pickle_file)
514            # Otherwise, load it as text file
515            else:
516                txt = np.loadtxt(self.cl_file)
517                mapping = {"tt": 0, "ee": 1, "bb": 2, "te": 3, "et": 3}
518                res = self.txt2dict(txt, mapping)
519            return res
520
521        try:
522            import camb
523        except ImportError:
524            print("CAMB seems to be not installed. Check the requirements.")
525
526        # Read the ini file containing the parameters for CAMB
527        path = os.path.dirname(os.path.abspath(__file__))
528        planck_path = os.path.join(path, "planck_2018.ini")
529        pars = camb.read_ini(planck_path)
530
531        if "bb" in self.keys:  # If we want to include the tensor mode
532            print(f"\nProducing fiducial spectra for r={self.r} and nt={self.nt}")
533            pars.InitPower.set_params(
534                As=2.100549e-9,
535                ns=0.9660499,
536                r=self.r,
537                nt=self.nt,
538                pivot_tensor=self.pivot_t,
539                pivot_scalar=0.05,
540                parameterization=2,
541            )
542            pars.WantTensors = True
543            pars.Accuracy.AccurateBB = True
544        pars.DoLensing = True
545        # _pars.Accuracy.AccuracyBoost = 2 # This helps getting an extra squeeze on the accordance of Cobaya and Fiducial spectra
546
547        if self.debug:
548            print(pars)
549
550        results = camb.get_results(pars)
551        res = results.get_cmb_power_spectra(
552            CMB_unit="muK",
553            lmax=self.lmax,
554            raw_cl=False,
555        )
556        return self.CAMBres2dict(res)

Produce fiducial spectra or read the input ones.

If the user has not provided a Cl file, this function will produce the fiducial power spectra starting from the CAMB inifile for Planck2018. The extra keywords defined will maximize the accordance between the fiducial Cls and the ones obtained from Cobaya. If B-modes are requested, the tensor-to-scalar ratio and the spectral tilt will be set to the requested values. Note that if you do not provide a tilt, this will follow the standard single-field consistency relation. If instead you provide a custom file, stores that.

def prod_noise(self):
558    def prod_noise(self):
559        """Produce noise power spectra or read the input ones.
560
561        If the user has not provided a noise file, this function will produce the noise power spectra for a given experiment with inverse noise weighting of white noise in each channel (TT, EE, BB). Note that you may want to have a look at the procedure since it is merely a place-holder. Indeed, you should provide a more realistic file from which to read the noise spectra, given that inverse noise weighting severely underestimates the amount of noise. If instead you provide the proper custom file, this method stores that.
562        """
563        # If the input noise file is a pickle file, load it.
564        if self.nl_file is not None:
565            if self.nl_file.endswith(".pkl"):
566                with open(self.nl_file, "rb") as pickle_file:
567                    res = pickle.load(pickle_file)
568            # If not, load the file as a text file
569            else:
570                _txt = np.loadtxt(self.nl_file)
571                # Convert the text file to a dictionary
572                res = self.txt2dict(_txt, self.mapping, apply_ellfactor=True)
573            return res
574
575        print(
576            "***WARNING***: the inverse noise weighting performed here severely underestimates \
577            the actual noise level of LiteBIRD. You should provide an input \
578            noise power spectrum with a more realistic noise."
579        )
580
581        try:
582            import yaml
583            from yaml.loader import SafeLoader
584            import healpy as hp
585        except ImportError:
586            print("YAML or Healpy seems to be not installed. Check the requirements.")
587
588        assert (
589            self.experiment is not None
590        ), "You must specify the experiment you want to consider"
591        print(f"\nComputing noise for {self.experiment}")
592
593        path = os.path.dirname(os.path.abspath(__file__))
594        experiments_path = os.path.join(path, "experiments.yaml")
595        with open(experiments_path) as f:
596            data = yaml.load(f, Loader=SafeLoader)
597
598        # Get the instrument data from the saved data
599        instrument = data[self.experiment]
600
601        # Get the FWHM values from the instrument data
602        fwhms = np.array(instrument["fwhm"])
603
604        # Get the frequency values from the instrument data
605        freqs = np.array(instrument["frequency"])
606
607        # Get the depth values from the instrument data
608        depth_p = np.array(instrument["depth_p"])
609        depth_i = np.array(instrument["depth_i"])
610
611        # Convert the depth to a pixel value
612        depth_p /= hp.nside2resol(self.nside, arcmin=True)
613        depth_i /= hp.nside2resol(self.nside, arcmin=True)
614        depth_p = depth_p * np.sqrt(
615            hp.pixelfunc.nside2pixarea(self.nside, degrees=False),
616        )
617        depth_i = depth_i * np.sqrt(
618            hp.pixelfunc.nside2pixarea(self.nside, degrees=False),
619        )
620
621        # Get the number of frequencies
622        n_freq = len(freqs)
623
624        # Define the ell values as a numpy array
625        ell = np.arange(0, self.lmax + 1, 1)
626
627        # Define the keys for the dictionary that will be returned
628        keys = ["tt", "ee", "bb"]
629
630        sigma = np.radians(fwhms / 60.0) / np.sqrt(8.0 * np.log(2.0))
631        sigma2 = sigma**2
632
633        # Calculate the Gaussian beam function
634        g = np.exp(ell * (ell + 1) * sigma2[:, np.newaxis])
635
636        # Calculate the polarization factor
637        pol_factor = np.array(
638            [np.zeros(sigma2.shape), 2 * sigma2, 2 * sigma2, sigma2],
639        )
640
641        # Calculate the polarization factor as a function of ell
642        pol_factor = np.exp(pol_factor)
643
644        # Calculate the Gaussian beam function for each polarization
645        G = []
646        for i, arr in enumerate(pol_factor):
647            G.append(g * arr[:, np.newaxis])
648        g = np.array(G)
649
650        # Initialize the dictionary that will be returned
651        res = {key: np.zeros((n_freq, self.lmax + 1)) for key in keys}
652
653        # Calculate the unnormalized power spectra
654        res["tt"] = 1 / (g[0, :, :] * depth_i[:, np.newaxis] ** 2)
655        res["ee"] = 1 / (g[3, :, :] * depth_p[:, np.newaxis] ** 2)
656        res["bb"] = 1 / (g[3, :, :] * depth_p[:, np.newaxis] ** 2)
657
658        # Calculate the normalized power spectra
659        res["tt"] = ell * (ell + 1) / (np.sum(res["tt"], axis=0)) / 2 / np.pi
660        res["ee"] = ell * (ell + 1) / (np.sum(res["ee"], axis=0)) / 2 / np.pi
661        res["bb"] = ell * (ell + 1) / (np.sum(res["bb"], axis=0)) / 2 / np.pi
662
663        res["tt"][:2] = [0, 0]
664        res["ee"][:2] = [0, 0]
665        res["bb"][:2] = [0, 0]
666
667        return res

Produce noise power spectra or read the input ones.

If the user has not provided a noise file, this function will produce the noise power spectra for a given experiment with inverse noise weighting of white noise in each channel (TT, EE, BB). Note that you may want to have a look at the procedure since it is merely a place-holder. Indeed, you should provide a more realistic file from which to read the noise spectra, given that inverse noise weighting severely underestimates the amount of noise. If instead you provide the proper custom file, this method stores that.

def initialize(self):
669    def initialize(self):
670        """Initializes the fiducial spectra and the noise power spectra."""
671        # Compute the fiducial and noise power spectra
672        self.fiduCLS = self.prod_fidu()
673        self.noiseCLS = self.prod_noise()
674
675        # Compute the covariance matrices
676        self.fiduCOV = self.cov_filling(self.fiduCLS)
677        self.noiseCOV = self.cov_filling(self.noiseCLS)
678
679        # Print some information for debugging
680        if self.debug:
681            print(f"Keys of fiducial CLs ---> {self.fiduCLS.keys()}")
682            print(f"Keys of noise CLs ---> {self.noiseCLS.keys()}")
683
684            print("\nPrinting the first few values to check that it starts from 0...")
685            field = list(self.fiduCLS.keys())[0]
686            print(f"Fiducial CLs for {field.upper()} ---> {self.fiduCLS[field][0:5]}")
687            field = list(self.noiseCLS.keys())[0]
688            print(f"Noise CLs for {field.upper()} ---> {self.noiseCLS[field][0:5]}")
689
690        # Compute the total covariance matrix
691        self.data = (
692            self.fiduCOV[:, :, self.lmin : self.lmax + 1]
693            + self.noiseCOV[:, :, self.lmin : self.lmax + 1]
694        )
695
696        # Compute the inverse of the covariance matrix
697        if self.like == "gaussian":
698            self.gauss_keys = self.get_Gauss_keys()
699            sigma2 = self.sigma(self.gauss_keys, self.fiduCLS, self.noiseCLS)
700            self.sigma2 = self.inv_sigma(sigma2)

Initializes the fiducial spectra and the noise power spectra.

def get_requirements(self):
702    def get_requirements(self):
703        """Defines requirements of the likelihood, specifying quantities calculated by a theory code are needed. Note that you may want to change the overall keyword from 'Cl' to 'unlensed_Cl' if you want to work without considering lensing."""
704        # The likelihood needs the lensed CMB angular power spectra. The keyword can be set to "unlensed_Cl" to get the unlensed ones
705        requitements = {}
706        requitements["Cl"] = {cl: self.lmax for cl in self.keys}
707        # If debug is set to True, the likelihood will print the list of items required by the likelihood
708        if self.debug:
709            requitements["CAMBdata"] = None
710            print(
711                f"\nYou requested that Cobaya provides to the likelihood the following items: {requitements}",
712            )
713        return requitements

Defines requirements of the likelihood, specifying quantities calculated by a theory code are needed. Note that you may want to change the overall keyword from 'Cl' to 'unlensed_Cl' if you want to work without considering lensing.

def data_vector(self, cov):
715    def data_vector(self, cov):
716        """Get data vector from the covariance matrix.
717
718        Extracts the data vector necessary for the Gaussian case. Note that this will cut the null value since some may be null when the fields have different values for lmax.
719
720        Parameters:
721            cov (np.ndarray):
722                A ndarray containing the covariance matrices, with some null ones.
723        """
724        return cov[np.triu_indices(self.n)][cov[np.triu_indices(self.n)] != 0]

Get data vector from the covariance matrix.

Extracts the data vector necessary for the Gaussian case. Note that this will cut the null value since some may be null when the fields have different values for lmax.

Arguments:
  • cov (np.ndarray): A ndarray containing the covariance matrices, with some null ones.
def chi_exact(self, i=0):
726    def chi_exact(self, i=0):
727        """Computes proper chi-square term for the exact likelihood case.
728
729        Parameters:
730            i (int, optional):
731                ell index if needed. Defaults to 0.
732        """
733        # If the number of datasets is not equal to 1, then we have a
734        # multi-dataset case, in which case we need to compute the
735        # covariance matrix for each dataset.
736        if self.n != 1:
737            # We extract the covariance matrix and data for the ith
738            # dataset.
739            coba = self.coba[:, :, i]
740            data = self.data[:, :, i]
741            det = np.linalg.det(coba)
742            # If the determinant is equal to 0, then we need to reduce
743            # the dimensionality of the data and covariance matrix.
744            if det == 0:
745                data = self.get_reduced_data(data)
746                coba = self.get_reduced_data(coba)
747            # We compute the matrix M using the covariance matrix and
748            # the data.
749            M = np.linalg.solve(coba, data)
750            # We compute the chi-square term using the trace of M, the
751            # log determinant of M, and the number of fields.
752            return np.trace(M) - np.linalg.slogdet(M)[1] - data.shape[0]
753        # If the number of datasets is equal to 1, then we have a single
754        # dataset case, in which case we do not need to loop over the
755        # datasets.
756        else:
757            # We compute the matrix M using the covariance matrix and
758            # the data.
759            M = self.data / self.coba
760            # We compute the chi-square term using M, the log of M, and
761            # a constant value.
762            return M - np.log(np.abs(M)) - 1

Computes proper chi-square term for the exact likelihood case.

Arguments:
  • i (int, optional): ell index if needed. Defaults to 0.
def chi_gaussian(self, i=0):
764    def chi_gaussian(self, i=0):
765        """Computes proper chi-square term for the Gaussian likelihood case.
766
767        Parameters:
768            i (int, optional):
769                ell index if needed. Defaults to 0.
770        """
771        # If we have more than one data vector
772        if self.n != 1:
773            coba = self.data_vector(self.coba[:, :, i])
774            data = self.data_vector(self.data[:, :, i])
775            return (coba - data) @ self.sigma2[i] @ (coba - data)
776        # If we have only one data vector
777        else:
778            coba = self.coba[0, 0, :]
779            data = self.data[0, 0, :]
780            res = (coba - data) * self.sigma2 * (coba - data)
781            return res

Computes proper chi-square term for the Gaussian likelihood case.

Arguments:
  • i (int, optional): ell index if needed. Defaults to 0.
def compute_chi_part(self, i=0):
783    def compute_chi_part(self, i=0):
784        """Chooses which chi-square term to compute.
785
786        Parameters:
787            i (int, optional):
788                ell index if needed. Defaults to 0.
789        """
790        # check if the likelihood is "exact"
791        if self.like == "exact":
792            # if so, compute the chi-square term for the exact likelihood
793            return self.chi_exact(i)
794        # if not, check if it is "gaussian"
795        elif self.like == "gaussian":
796            # if so, compute the chi-square term for the gaussian likelihood
797            return self.chi_gaussian(i)
798        # if neither, print an error message
799        else:
800            print("You requested something different from 'exact or 'gaussian'!")
801            return

Chooses which chi-square term to compute.

Arguments:
  • i (int, optional): ell index if needed. Defaults to 0.
def log_likelihood(self):
803    def log_likelihood(self):
804        """Computes the log likelihood."""
805        # Get the array of multipoles
806        ell = np.arange(self.lmin, self.lmax + 1, 1)
807        # Compute the log likelihood for each multipole
808        if self.n != 1:
809            logp_ℓ = np.zeros(ell.shape)
810            for i in range(0, self.lmax + 1 - self.lmin):
811                logp_ℓ[i] = -0.5 * (2 * ell[i] + 1) * self.compute_chi_part(i)
812        else:
813            logp_ℓ = -0.5 * (2 * ell + 1) * self.compute_chi_part()
814        # Sum the log likelihood over multipoles
815        return np.sum(logp_ℓ)

Computes the log likelihood.

def logp(self, **params_values):
817    def logp(self, **params_values):
818        """Gets the log likelihood and pass it to Cobaya to carry on the MCMC process."""
819        if self.debug:
820            CAMBdata = self.provider.get_CAMBdata()
821            pars = CAMBdata.Params
822            print(pars)
823
824        # Get the Cls from Cobaya
825        self.cobaCLs = self.provider.get_Cl(ell_factor=True)
826
827        if self.debug:
828            print(f"Keys of Cobaya CLs ---> {self.cobaCLs.keys()}")
829
830            field = list(self.cobaCLs.keys())[0]
831            print("\nPrinting the first few values to check that it starts from 0...")
832            print(f"Cobaya CLs for {field.upper()} ---> {self.cobaCLs[field][0:5]}")
833
834        # Fill the covariance matrix with the Cls from Cobaya
835        self.cobaCOV = self.cov_filling(self.cobaCLs)
836
837        if self.debug:
838            ell = np.arange(0, self.lmax + 1, 1)
839            plt.loglog(ell, self.fiduCOV[0, 0, :], label="Fiducial CLs")
840            plt.loglog(ell, self.cobaCOV[0, 0, :], label="Cobaya CLs", ls="--")
841            plt.loglog(ell, self.noiseCOV[0, 0, :], label="Noise CLs")
842            plt.xlim(2, None)
843            plt.legend()
844            plt.show()
845
846        # Add the noise covariance to the covariance matrix filled with the Cls from Cobaya
847        self.coba = (
848            self.cobaCOV[:, :, self.lmin : self.lmax + 1]
849            + self.noiseCOV[:, :, self.lmin : self.lmax + 1]
850        )
851
852        # Compute the likelihood
853        logp = self.log_likelihood()
854
855        if self.debug:
856            print(logp)
857            exit()
858
859        return logp

Gets the log likelihood and pass it to Cobaya to carry on the MCMC process.

Inherited Members
cobaya.likelihood.Likelihood
marginal
calculate
wait
cobaya.theory.Theory
must_provide
initialize_with_params
initialize_with_provider
get_param
get_result
get_can_provide_methods
get_can_provide
get_can_provide_params
get_can_support_params
get_allow_agnostic
input_params_extra
set_cache_size
check_cache_and_compute
get_current_derived
get_provider
get_helper_theories
update_for_helper_theories
get_attr_list_with_helpers
get_speed
set_measured_speed
cobaya.component.CobayaComponent
set_timing_on
get_name
close
set_instance_defaults
get_version
has_version
get_kind
compare_versions
cobaya.log.HasLogger
set_logger
is_debug
mpi_warning
mpi_info
mpi_debug
cobaya.component.HasDefaults
get_qualified_names
get_qualified_class_name
get_class_path
get_file_base_name
get_root_file_name
get_yaml_file
get_desc
get_bibtex
get_associated_file_content
get_class_options
get_defaults
get_annotations
cobaya.likelihood.LikelihoodInterface
current_logp