@misc{15095, author = {Bogdan Ionescu and Henning M{\"u}ller and Mauricio Villegas and Alba de Herrera and Carsten Eickhoff and Vincent Andrearczyk and Yashin Cid and Vitali Liauchuk and Vassili Kovalev and Sadid Hasan and Yan Ling and Oladimeji Farri and Joey Liu and Matthew Lungren and Duc-Tien Dang-Nguyen and Luca Prias and Michael Riegler and Liting Zhou and Mathias Lux and Cathal Gurrin}, editor = {Patrice Bellot}, title = {Overview of ImageCLEF 2018: Challenges, datasets and evaluation}, abstract = {This paper presents an overview of the ImageCLEF 2018 evaluation campaign, an event that was organized as part of the CLEF (Conference and Labs of the Evaluation Forum) Labs 2018. ImageCLEF is an ongoing initiative (it started in 2003) that promotes the evaluation of technologies for annotation, indexing and retrieval with the aim of providing information access to collections of images in various usage scenarios and domains. In 2018, the 16th edition of ImageCLEF ran three main tasks and a pilot task: (1) a caption prediction task that aims at predicting the caption of a figure from the biomedical literature based only on the figure image; (2) a tuberculosis task that aims at detecting the tuberculosis type, severity and drug resistance from CT (Computed Tomography) volumes of the lung; (3) a LifeLog task (videos, images and other sources) about daily activities understanding and moment retrieval, and (4) a pilot task on visual question answering where systems are tasked with answering medical questions. The strong participation, with over 100 research groups registering and 31 submitting results for the tasks, shows an increasing interest in this benchmarking campaign.}, year = {2018}, journal = {ImageCLEF 2018}, month = {08/2018}, publisher = {Springer}, url = {https://link.springer.com/chapter/10.1007/978-3-319-98932-7_28}, }