@article{16399, author = {Olav Rongved and Markus Stige and Steven Hicks and Vajira Thambawita and Cise Midoglu and Evi Zouganeli and Dag Johansen and Michael Riegler and P{\r a}l Halvorsen}, title = {Automated Event Detection and Classification in Soccer: The Potential of Using Multiple Modalities}, abstract = {Detecting events in videos is a complex task, and many different approaches, aimed at a large variety of use-cases, have been proposed in the literature. Most approaches, however, are unimodal and only consider the visual information in the videos. This paper presents and evaluates different approaches based on neural networks where we combine visual features with audio features to detect (spot) and classify events in soccer videos. We employ model fusion to combine different modalities such as video and audio, and test these combinations against different state-of-the-art models on the SoccerNet dataset. The results show that a multimodal approach is beneficial. We also analyze how the tolerance for delays in classification and spotting time, and the tolerance for prediction accuracy, influence the results. Our experiments show that using multiple modalities improves event detection performance for certain types of events.}, year = {2021}, journal = {Machine Learning and Knowledge Extraction}, volume = {3}, pages = {1030 - 1054}, month = {12/2021}, publisher = {MDPI}, url = {https://www.mdpi.com/2504-4990/3/4/51/pdf}, doi = {10.3390/make3040051}, }