feat(distributions): add lognakagami, loggamma, and kl_statistic
Implement two new scipy-compatible distributions : Log-Nakagami (lognakagami) and Log-Gamma (loggamma_dist), with complete logpdf/cdf/ppf/stats/entropy/rvs methods derived from the change-of-variable Y = ln(X). Add kl_statistic, a KDE-based KL-divergence goodness-of-fit callable compatible with the Fitter class. Extend k_gen with _stats (improving speed), _cdf, and a fit guard, and switch kv → kve to improve numerical stability at large arguments. Add unit tests for all three additions covering normalization, monotonicity, ppf inversion, moment formulas, and Fitter integration.
This commit is contained in:
@@ -6,7 +6,7 @@ import os
|
||||
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
|
||||
|
||||
from tools.statistics import aic_statistic, bic_statistic
|
||||
from tools.statistics import aic_statistic, bic_statistic, kl_statistic
|
||||
from fitting.fitter import Fitter
|
||||
|
||||
|
||||
@@ -208,3 +208,129 @@ class TestBicStatisticInFitter:
|
||||
f.validate(n_mc_samples=99)
|
||||
assert f["gamma"].test_result is not None
|
||||
assert f["expon"].test_result is not None
|
||||
|
||||
|
||||
# ── kl_statistic unit tests ───────────────────────────────────────────────────
|
||||
|
||||
|
||||
class TestKlStatistic:
|
||||
def _fitted_dist(self, dist, data, **kwargs):
|
||||
"""Return a frozen distribution fitted to data."""
|
||||
params = dist.fit(data, **kwargs)
|
||||
return dist(*params)
|
||||
|
||||
def test_returns_float(self):
|
||||
"""kl_statistic must return a numeric scalar."""
|
||||
frozen = self._fitted_dist(gamma, GAMMA_DATA, floc=0)
|
||||
result = kl_statistic(frozen, GAMMA_DATA, axis=None)
|
||||
assert isinstance(float(result), float)
|
||||
|
||||
def test_returns_real_value(self):
|
||||
"""kl_statistic returns a real number (KDE finite-sum approximation can be negative)."""
|
||||
frozen = self._fitted_dist(gamma, GAMMA_DATA, floc=0)
|
||||
result = kl_statistic(frozen, GAMMA_DATA, axis=None)
|
||||
assert np.isreal(result)
|
||||
|
||||
def test_result_is_finite(self):
|
||||
"""kl_statistic must return a finite value for valid input."""
|
||||
frozen = self._fitted_dist(gamma, GAMMA_DATA, floc=0)
|
||||
assert np.isfinite(kl_statistic(frozen, GAMMA_DATA, axis=None))
|
||||
|
||||
def test_works_with_axis_zero(self):
|
||||
"""kl_statistic must return a finite value when axis=0."""
|
||||
frozen = self._fitted_dist(gamma, GAMMA_DATA, floc=0)
|
||||
result = kl_statistic(frozen, GAMMA_DATA, axis=0)
|
||||
assert np.isfinite(result)
|
||||
|
||||
def test_axis_zero_same_as_axis_none_for_1d(self):
|
||||
"""For 1-D data, axis=0 and axis=None must return the same value."""
|
||||
frozen = self._fitted_dist(gamma, GAMMA_DATA, floc=0)
|
||||
result_none = kl_statistic(frozen, GAMMA_DATA, axis=None)
|
||||
result_axis0 = kl_statistic(frozen, GAMMA_DATA, axis=0)
|
||||
assert pytest.approx(result_none) == result_axis0
|
||||
|
||||
def test_better_fit_has_lower_kl(self):
|
||||
"""Gamma fitted to gamma data should have lower KL than normal fitted to gamma data."""
|
||||
gamma_frozen = self._fitted_dist(gamma, GAMMA_DATA, floc=0)
|
||||
norm_frozen = self._fitted_dist(norm, GAMMA_DATA)
|
||||
kl_gamma = kl_statistic(gamma_frozen, GAMMA_DATA, axis=None)
|
||||
kl_norm = kl_statistic(norm_frozen, GAMMA_DATA, axis=None)
|
||||
assert kl_gamma < kl_norm
|
||||
|
||||
def test_matches_manual_formula(self):
|
||||
"""kl_statistic result must match the KDE-based KL formula computed manually."""
|
||||
from scipy.stats import gaussian_kde
|
||||
|
||||
frozen = self._fitted_dist(gamma, GAMMA_DATA, floc=0)
|
||||
kde = gaussian_kde(GAMMA_DATA)
|
||||
data_pdf = kde(GAMMA_DATA)
|
||||
dist_pdf = frozen.pdf(GAMMA_DATA)
|
||||
epsilon = 1e-10
|
||||
expected = np.sum(
|
||||
data_pdf * np.log((data_pdf + epsilon) / (dist_pdf + epsilon))
|
||||
)
|
||||
assert pytest.approx(kl_statistic(frozen, GAMMA_DATA, axis=None)) == expected
|
||||
|
||||
def test_no_nan_when_dist_pdf_near_zero(self):
|
||||
"""epsilon guard must prevent NaN when dist PDF is effectively zero over data."""
|
||||
# expon with large loc has near-zero PDF over positive-skewed gamma data
|
||||
far_dist = expon(loc=100.0, scale=1.0)
|
||||
result = kl_statistic(far_dist, GAMMA_DATA, axis=None)
|
||||
assert not np.isnan(result)
|
||||
|
||||
def test_result_is_consistent_across_calls(self):
|
||||
"""Two calls with identical inputs must return the same value."""
|
||||
frozen = self._fitted_dist(gamma, GAMMA_DATA, floc=0)
|
||||
r1 = kl_statistic(frozen, GAMMA_DATA, axis=None)
|
||||
r2 = kl_statistic(frozen, GAMMA_DATA, axis=None)
|
||||
assert r1 == r2
|
||||
|
||||
|
||||
# ── Integration: kl_statistic as callable in Fitter ──────────────────────────
|
||||
|
||||
|
||||
class TestKlStatisticInFitter:
|
||||
def test_fitter_accepts_kl_callable(self):
|
||||
f = Fitter([gamma], statistic_method=kl_statistic, gamma_params={"floc": 0})
|
||||
f.fit(GAMMA_DATA)
|
||||
f.validate(n_mc_samples=99)
|
||||
assert f["gamma"].test_result is not None
|
||||
|
||||
def test_fitter_kl_statistic_is_finite(self):
|
||||
f = Fitter([gamma], statistic_method=kl_statistic, gamma_params={"floc": 0})
|
||||
f.fit(GAMMA_DATA)
|
||||
f.validate(n_mc_samples=99)
|
||||
assert np.isfinite(f["gamma"].gof_statistic)
|
||||
|
||||
def test_fitter_kl_pvalue_in_range(self):
|
||||
f = Fitter([gamma], statistic_method=kl_statistic, gamma_params={"floc": 0})
|
||||
f.fit(GAMMA_DATA)
|
||||
f.validate(n_mc_samples=99)
|
||||
pval = f["gamma"].pvalue
|
||||
assert 0.0 <= pval <= 1.0
|
||||
|
||||
def test_fitter_kl_vs_ad_different_statistic_values(self):
|
||||
"""KL and AD statistics should differ numerically."""
|
||||
f_kl = Fitter(
|
||||
[gamma], statistic_method=kl_statistic, gamma_params={"floc": 0}
|
||||
)
|
||||
f_ad = Fitter([gamma], statistic_method="ad", gamma_params={"floc": 0})
|
||||
f_kl.fit(GAMMA_DATA)
|
||||
f_ad.fit(GAMMA_DATA)
|
||||
f_kl.validate(n_mc_samples=99)
|
||||
f_ad.validate(n_mc_samples=99)
|
||||
assert f_kl["gamma"].gof_statistic != pytest.approx(
|
||||
f_ad["gamma"].gof_statistic
|
||||
)
|
||||
|
||||
def test_fitter_kl_multiple_distributions(self):
|
||||
f = Fitter(
|
||||
[gamma, expon],
|
||||
statistic_method=kl_statistic,
|
||||
gamma_params={"floc": 0},
|
||||
expon_params={"floc": 0},
|
||||
)
|
||||
f.fit(GAMMA_DATA)
|
||||
f.validate(n_mc_samples=99)
|
||||
assert f["gamma"].test_result is not None
|
||||
assert f["expon"].test_result is not None
|
||||
|
||||
Reference in New Issue
Block a user