@inproceedings{193ab53ea7984cc6a717ba9e8be6ebdb,
title = "WAV2VEC-based detection and severity level classification of dysarthria from speech",
abstract = "Automatic detection and severity level classification of dysarthria directly from acoustic speech signals can be used as a tool in medical diagnosis. In this work, the pre-trained wav2vec 2.0 model is studied as a feature extractor to build detection and severity level classification systems for dysarthric speech. The experiments were carried out with the popularly used UA-speech database. In the detection experiments, the results revealed that the best performance was obtained using the embeddings from the first layer of the wav2vec model that yielded an absolute improvement of 1.23% in accuracy compared to the best performing baseline feature (spectrogram). In the studied severity level classification task, the results revealed that the embeddings from the final layer gave an absolute improvement of 10.62% in accuracy compared to the best baseline features (mel-frequency cepstral coefficients).",
author = "Farhad Javanmardi and Saska Tirronen and Manila Kodali and Sudarsana Kadiri and Paavo Alku",
year = "2023",
doi = "10.1109/ICASSP49357.2023.10094857",
language = "English",
series = " Proceedings of the IEEE International Conference on Acoustics, Speech, and Signal Processing",
publisher = "IEEE",
booktitle = "Proceedings of the IEEE International Conference on Acoustics, Speech, and Signal Processing (ICASSP{\textquoteright}23)",
address = "United States",
note = "IEEE International Conference on Acoustics, Speech, and Signal Processing, ICASSP ; Conference date: 04-06-2023 Through 10-06-2023",
}