@misc{17111, keywords = {Explainable artificial intelligence, concept-based explanations, diabetic retinopathy}, author = {Andrea Stor{\r a}s and Josefine Sundgaard}, title = {Concept Explanations for Deep Learning-Based Diabetic Retinopathy Diagnosis}, abstract = {Diabetic retinopathy (DR) is a common complication of diabetes that damages the eye and potentially leads to blindness. The severity and treatment choice of DR depends on the presence of medical findings in fundus images. Much work has been done in developing complex machine learning (ML) models to automatically diagnose DR from fundus images. However, their high level of complexity increases the demand for techniques improving human understanding of the ML models. Explainable artificial intelligence (XAI) methods can detect weaknesses in ML models and increase trust among end users. In the medical field, it is crucial to explain ML models in order to apply them in the clinic. While a plethora of XAI methods exists, heatmaps are typically applied for explaining ML models for DR diagnosis. Heatmaps highlight image areas that are regarded as important for the model when making a prediction. Even though heatmaps are popular, they can be less appropriate in the medical field. Testing with Concept Activation Vectors (TCAV), providing explanations based on human-friendly concepts, can be a more suitable alternative for explaining models for DR diagnosis, but it has not been thoroughly investigated for DR models. We develop a deep neural network for diagnosing DR from fundus images and apply TCAV for explaining the resulting model. Concept generation with and without masking is compared. Based on diagnostic criteria for DR, we evaluate the model{\textquoteright}s concept ranking for different severity levels of DR. TCAV can explain individual images to gain insight into a specific case, or an entire class to evaluate overall consistency with diagnostic standards. The most important concepts for the DR model agree with diagnostic criteria for DR. No large differences are detected between the two concept generation approaches. TCAV is a flexible explanation method where human-friendly concepts provide insights and trust in ML models for medical image analyses, and it shows promising results for DR grading.}, year = {2023}, journal = {Nordic AI Meet 2023}, address = {Nordic AI Meet 2023}, }