@article{GeneralizedBhattacharyyaChernoff-2014, title = {Generalized {B}hattacharyya and {C}hernoff upper bounds on {B}ayes error using quasi-arithmetic means}, journal = "Pattern Recognition Letters ", volume = "42", number = "0", pages = "25 - 34", year = "2014", note = "", issn = "0167-8655", doi = "http://dx.doi.org/10.1016/j.patrec.2014.01.002", url = "http://www.sciencedirect.com/science/article/pii/S0167865514000166", author = "Frank Nielsen", keywords = "Affinity coefficient", keywords = "Divergence", keywords = "Chernoff information", keywords = "Bhattacharrya distance", keywords = "Total variation distance", keywords = "Quasi-arithmetic means ", abstract = "Abstract Bayesian classification labels observations based on given prior information, namely class-a priori and class-conditional probabilities. Bayesf risk is the minimum expected classification cost that is achieved by the Bayesf test, the optimal decision rule. When no cost incurs for correct classification and unit cost is charged for misclassification, Bayesf test reduces to the maximum a posteriori decision rule, and Bayes risk simplifies to Bayesf error, the probability of error. Since calculating this probability of error is often intractable, several techniques have been devised to bound it with closed-form formula, introducing thereby measures of similarity and divergence between distributions like the Bhattacharyya coefficient and its associated Bhattacharyya distance. The Bhattacharyya upper bound can further be tightened using the Chernoff information that relies on the notion of best error exponent. In this paper, we first express Bayesf risk using the total variation distance on scaled distributions. We then elucidate and extend the Bhattacharyya and the Chernoff upper bound mechanisms using generalized weighted means. We provide as a byproduct novel notions of statistical divergences and affinity coefficients. We illustrate our technique by deriving new upper bounds for the univariate Cauchy and the multivariate t-distributions, and show experimentally that those bounds are not too distant to the computationally intractable Bayesf error. " }