@InProceedings{Kluettermann2024PAKDD, author="Kl{\"u}ttermann, Simon and Balestra, Chiara and M{\"u}ller, Emmanuel", editor="Yang, De-Nian and Xie, Xing and Tseng, Vincent S. and Pei, Jian and Huang, Jen-Wei and Lin, Jerry Chun-Wei", title="On the Efficient Explanation of Outlier Detection Ensembles Through Shapley Values", booktitle="Advances in Knowledge Discovery and Data Mining", year="2024", publisher="Springer Nature Singapore", address="Singapore", pages="43--55", abstract="Feature bagging models have revealed their practical usability in various contexts, among them in outlier detection, where they build ensembles to reliably assign outlier scores to data samples. However, the interpretability of so-obtained outlier detection methods is far from achieved. Among the standard black-box models interpretability approaches, we find Shapley values that clarify the roles of single inputs. However, Shapley values are characterized by high computational runtimes that make them useful in pretty low-dimensional applications. We propose bagged Shapley values, a method to achieve interpretability of feature bagging ensembles, especially for outlier detection. The method not only assigns local importance scores to each feature of the initial space, helping to increase the interpretability but also solves the computational issue; specifically, the bagged Shapley values can be exactly computed in polynomial time.", isbn="978-981-97-2259-4" }