@ARTICLE{10.3389/frobt.2022.1090012, AUTHOR={Safieh, Ali A. and Alhaol, Ibrahim Abu and Ghnemat, Rawan}, TITLE={End-to-end Jordanian dialect speech-to-text self-supervised learning framework}, JOURNAL={Frontiers in Robotics and AI}, VOLUME={9}, YEAR={2022}, URL={https://www.frontiersin.org/articles/10.3389/frobt.2022.1090012}, DOI={10.3389/frobt.2022.1090012}, ISSN={2296-9144}, ABSTRACT={Speech-to-text engines are extremely needed nowadays for different applications, representing an essential enabler in human–robot interaction. Still, some languages suffer from the lack of labeled speech data, especially in the Arabic dialects or any low-resource languages. The need for a self-supervised training process and self-training using noisy training is proven to be one of the up-and-coming feasible solutions. This article proposes an end-to-end, transformers-based model with a framework for low-resource languages. In addition, the framework incorporates customized audio-to-text processing algorithms to achieve a highly efficient Jordanian Arabic dialect speech-to-text system. The proposed framework enables ingesting data from many sources, making the ground truth from external sources possible by speeding up the manual annotation process. The framework allows the training process using noisy student training and self-supervised learning to utilize the unlabeled data in both pre- and post-training stages and incorporate multiple types of data augmentation. The proposed self-training approach outperforms the fine-tuned Wav2Vec model by 5% in terms of word error rate reduction. The outcome of this work provides the research community with a Jordanian-spoken data set along with an end-to-end approach to deal with low-resource languages. This is done by utilizing the power of the pretraining, post-training, and injecting noisy labeled and augmented data with minimal human intervention. It enables the development of new applications in the field of Arabic language speech-to-text area like the question-answering systems and intelligent control systems, and it will add human-like perception and hearing sensors to intelligent robots.} }