@misc{17225, keywords = {Explainable artificial intelligence, concept-based explanations, diabetic retinopathy}, author = {Andrea Stor{\r a}s and Josefine Sundgaard}, title = {Looking into Concept Explanation Methods for Diabetic Retinopathy Classification}, abstract = {Diabetic retinopathy is a common complication of diabetes, and monitoring the progression of retinal abnormalities using fundus imaging is crucial. Because the images must be interpreted by a medical expert, it is infeasible to screen all individuals with diabetes for diabetic retinopathy. Deep learning has shown impressive results for automatic analysis and grading of fundus images. One drawback is, however, the lack of interpretability, which hampers the implementation of such systems in the clinic. Explainable artificial intelligence methods can be applied to explain the deep neural networks. Explanations based on concepts have shown to be intuitive for humans to understand, but have not yet been explored in detail for diabetic retinopathy grading. This work investigates and compares two concept-based explanation techniques for explaining deep neural networks developed for automatic diagnosis of diabetic retinopathy: Quantitative Testing with Concept Activation Vectors and Concept Bottleneck Models. We found that both methods have strengths and weaknesses, and choice of method should take the available data and the end user{\textquoteright}s preferences into account.}, year = {2023}, journal = {Workshop on Interpretability of Machine Intelligence in Medical Image Computing at MICCAI 2023}, }