2025
|
| Leonarczyk, Ricardo; Regio, Murilo; Andrade, Cristiano; Garcia, Luan; Griebler, Dalvan; Oliveira, Ewerton; Paula, Thomas Proposta de Arcabouço Teórico para a Avaliação Sistemática de Modelos de Linguagem Quantizados Inproceedings doi In: Anais da I Escola Regional de Aprendizado de Máquina e Inteligência Artificial da Região Sul, pp. 37-40, SBC Porto Alegre, Brazil, 2025. @inproceedings{LEONARCZYK:ERAMIA:25,
title = {Proposta de Arcabouço Teórico para a Avaliação Sistemática de Modelos de Linguagem Quantizados},
author = {Ricardo Leonarczyk and Murilo Regio and Cristiano Andrade and Luan Garcia and Dalvan Griebler and Ewerton Oliveira and Thomas Paula},
url = {https://doi.org/10.5753/eramiars.2025.16626},
doi = {10.5753/eramiars.2025.16626},
year = {2025},
date = {2025-11-01},
booktitle = {Anais da I Escola Regional de Aprendizado de Máquina e Inteligência Artificial da Região Sul},
pages = {37-40},
address = {Porto Alegre, Brazil},
organization = {SBC},
abstract = {Este artigo propõe um arcabouço conceitual para sistematizar a avaliação de modelos de linguagem quantizados, organizando-a em quatro níveis hierárquicos de escopo e rigor crescentes. Ele é fundamentado em uma revisão sistemática da literatura sobre quantização pós-treinamento, cujos resultados também orientam a seleção de métricas e benchmarks apropriados para cada nível. Ao estabelecer uma metodologia estruturada e baseada em evidências, o arcabouço visa aprimorar a transparência, a reprodutibilidade e a comparabilidade da pesquisa na área.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Este artigo propõe um arcabouço conceitual para sistematizar a avaliação de modelos de linguagem quantizados, organizando-a em quatro níveis hierárquicos de escopo e rigor crescentes. Ele é fundamentado em uma revisão sistemática da literatura sobre quantização pós-treinamento, cujos resultados também orientam a seleção de métricas e benchmarks apropriados para cada nível. Ao estabelecer uma metodologia estruturada e baseada em evidências, o arcabouço visa aprimorar a transparência, a reprodutibilidade e a comparabilidade da pesquisa na área. |
| Guder, Larissa; Dopke, Luan; Kaiser, Marcos; Griebler, Dalvan; Meneguzzi, Felipe BAH: Beyond Acoustic Handcrafted features for speech emotion recognition in Portuguese Inproceedings doi In: Proceedings of the 31st Brazilian Symposium on Multimedia and the Web, pp. 86-93, SBC Rio de Janeiro, Brazil, 2025. @inproceedings{GUDER:WebMedia:25,
title = {BAH: Beyond Acoustic Handcrafted features for speech emotion recognition in Portuguese},
author = {Larissa Guder and Luan Dopke and Marcos Kaiser and Dalvan Griebler and Felipe Meneguzzi},
url = {https://doi.org/10.5753/webmedia.2025.16129},
doi = {10.5753/webmedia.2025.16129},
year = {2025},
date = {2025-11-01},
booktitle = {Proceedings of the 31st Brazilian Symposium on Multimedia and the Web},
pages = {86-93},
address = {Rio de Janeiro, Brazil},
organization = {SBC},
abstract = {It is through affective computing that we have the integration of human feelings and computing applications. One affective computing task is Speech Emotion Recognition (SER), which identifies emotions from spoken audio. Even though emotion is a universal aspect of human experience, each culture and language has different ways to express and understand emotions. So, when designing models for SER, it is common to focus on a single language. In this work, we explore VERBO, a Brazilian Portuguese dataset for categorical emotion recognition. Our main objective is to define the best way to extract acoustic features to train a classifier for SER.We compare 18 different methods to generate audio representations, grouped by handcrafted features and audio embeddings. The best representation for VERBO is TRILL embeddings, and with an SVM classifier, we achieved 92% accuracy in VERBO. As far as we know, this was the state of the art for this dataset.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
It is through affective computing that we have the integration of human feelings and computing applications. One affective computing task is Speech Emotion Recognition (SER), which identifies emotions from spoken audio. Even though emotion is a universal aspect of human experience, each culture and language has different ways to express and understand emotions. So, when designing models for SER, it is common to focus on a single language. In this work, we explore VERBO, a Brazilian Portuguese dataset for categorical emotion recognition. Our main objective is to define the best way to extract acoustic features to train a classifier for SER.We compare 18 different methods to generate audio representations, grouped by handcrafted features and audio embeddings. The best representation for VERBO is TRILL embeddings, and with an SVM classifier, we achieved 92% accuracy in VERBO. As far as we know, this was the state of the art for this dataset. |
| Dopke, Luan; Accorsi, Arthur; Aires, João; Guder, Larissa; Manssour, Isabel; Griebler, Dalvan SpeechVis: Simplifying Speech Emotion Visualization Inproceedings doi In: Proceedings of the 31st Brazilian Symposium on Multimedia and the Web, pp. 428-436, SBC Rio de Janeiro, Brazil, 2025. @inproceedings{DOPKE:WebMedia:25,
title = {SpeechVis: Simplifying Speech Emotion Visualization },
author = {Luan Dopke and Arthur Accorsi and João Aires and Larissa Guder and Isabel Manssour and Dalvan Griebler},
url = {https://doi.org/10.5753/webmedia.2025.16115},
doi = {10.5753/webmedia.2025.16115},
year = {2025},
date = {2025-11-01},
booktitle = {Proceedings of the 31st Brazilian Symposium on Multimedia and the Web},
pages = {428-436},
address = {Rio de Janeiro, Brazil},
organization = {SBC},
abstract = {As the amount of online content increases, analyzing and following discussions becomes harder. Relevant information, such as the main discussion topics and the emotions expressed in audio, e.g., in a podcast, requires people to watch or listen to the entire content to understand the context. However, this can take a long time, and people’s interpretations of emotions can bias their understanding of them. A visual summarization of such information can help people quickly understand the audio context and analyze the content regarding speakers, their emotions, and the main topics covered. In this work, we introduce SpeechVis, a visual analytics tool that visually summarizes speech emotions from an audio source. SpeechVis extracts multiple information from the audio, such as the transcription, speakers, main topics, and emotions, to provide visualizations and statistics about the discussed topics and each speaker’s emotions. We used multiple off-the-shelf machine learning models to extract audio information and developed several visual representations that aim to facilitate audio analysis. To evaluate SpeechVis, we selected two use cases and performed an analysis to demonstrate how the SpeechVis visualizations can give valuable insights and facilitate audio interpretation.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
As the amount of online content increases, analyzing and following discussions becomes harder. Relevant information, such as the main discussion topics and the emotions expressed in audio, e.g., in a podcast, requires people to watch or listen to the entire content to understand the context. However, this can take a long time, and people’s interpretations of emotions can bias their understanding of them. A visual summarization of such information can help people quickly understand the audio context and analyze the content regarding speakers, their emotions, and the main topics covered. In this work, we introduce SpeechVis, a visual analytics tool that visually summarizes speech emotions from an audio source. SpeechVis extracts multiple information from the audio, such as the transcription, speakers, main topics, and emotions, to provide visualizations and statistics about the discussed topics and each speaker’s emotions. We used multiple off-the-shelf machine learning models to extract audio information and developed several visual representations that aim to facilitate audio analysis. To evaluate SpeechVis, we selected two use cases and performed an analysis to demonstrate how the SpeechVis visualizations can give valuable insights and facilitate audio interpretation. |
| Guder, Larissa; Griebler, Dalvan; Meneguzzi, Felipe Impacto do Ajuste Fino na Redução de Dimensionalidade para Reconhecimento Multimodal de Emoções na Fala Inproceedings doi In: Anais da I Escola Regional de Aprendizado de Máquina e Inteligência Artificial da Região Sul, pp. 61-64, SBC Porto Alegre, Brazil, 2025. @inproceedings{GUDER:ERAMIA:25,
title = {Impacto do Ajuste Fino na Redução de Dimensionalidade para Reconhecimento Multimodal de Emoções na Fala },
author = {Larissa Guder and Dalvan Griebler and Felipe Meneguzzi},
url = {https://doi.org/10.5753/eramiars.2025.16644},
doi = {10.5753/eramiars.2025.16644},
year = {2025},
date = {2025-11-01},
booktitle = {Anais da I Escola Regional de Aprendizado de Máquina e Inteligência Artificial da Região Sul},
pages = {61-64},
address = {Porto Alegre, Brazil},
organization = {SBC},
abstract = {O objetivo desse trabalho é avaliar o impacto do ajuste fino na redução da dimensionalidade do embedding de sentença MiniLM L3, para a tarefa de reconhecimento dimensional de emoções na fala, através de uma abordagem bimodal que combina informações acústicas e textuais. O ajuste fino resultou em um aumento de 3x no Coeficiente de Correlação de Concordância para a dimensão de valência.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
O objetivo desse trabalho é avaliar o impacto do ajuste fino na redução da dimensionalidade do embedding de sentença MiniLM L3, para a tarefa de reconhecimento dimensional de emoções na fala, através de uma abordagem bimodal que combina informações acústicas e textuais. O ajuste fino resultou em um aumento de 3x no Coeficiente de Correlação de Concordância para a dimensão de valência. |
| Madeira, Caio; Magnaguagno, Maurício; Griebler, Dalvan Language Models are the new Doom Inproceedings doi In: Anais da I Escola Regional de Aprendizado de Máquina e Inteligência Artificial da Região Sul, pp. 416-419, SBC Porto Alegre, Brazil, 2025. @inproceedings{MADEIRA:ERAMIA:25,
title = {Language Models are the new Doom },
author = {Caio Madeira and Maurício Magnaguagno and Dalvan Griebler},
url = {https://doi.org/10.5753/eramiars.2025.16777},
doi = {10.5753/eramiars.2025.16777},
year = {2025},
date = {2025-11-01},
booktitle = {Anais da I Escola Regional de Aprendizado de Máquina e Inteligência Artificial da Região Sul},
pages = {416-419},
address = {Porto Alegre, Brazil},
organization = {SBC},
abstract = {As more devices include a display, more probable they may run Doom. Various techniques are used to limit the processing required by the game. A parallel can be drawn with small language models, which are being ported to a wide variety of devices, including legacy ones. While less capable than their large-scale counterparts, these models retain the essence of natural language processing. In this work, we map parallels between Doom and language models, focusing on a TinyLlama model trained on the TinyStories dataset ported to the PlayStation Portable, with hyper-parameter value experimentation},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
As more devices include a display, more probable they may run Doom. Various techniques are used to limit the processing required by the game. A parallel can be drawn with small language models, which are being ported to a wide variety of devices, including legacy ones. While less capable than their large-scale counterparts, these models retain the essence of natural language processing. In this work, we map parallels between Doom and language models, focusing on a TinyLlama model trained on the TinyStories dataset ported to the PlayStation Portable, with hyper-parameter value experimentation |
| Munhoz, Pedro; Cavazzotto, Guilherme; Guder, Larissa; Dopke, Luan; Griebler, Dalvan Avaliação do uso de embeddings para o reconhecimento de emoções na fala Inproceedings doi In: Anais da I Escola Regional de Aprendizado de Máquina e Inteligência Artificial da Região Sul, pp. 268-271, SBC Porto Alegre, Brazil, 2025. @inproceedings{MUNHOZ:ERAMIA:25,
title = {Avaliação do uso de embeddings para o reconhecimento de emoções na fala},
author = {Pedro Munhoz and Guilherme Cavazzotto and Larissa Guder and Luan Dopke and Dalvan Griebler},
url = {https://doi.org/10.5753/eramiars.2025.16638},
doi = {10.5753/eramiars.2025.16638},
year = {2025},
date = {2025-11-01},
booktitle = {Anais da I Escola Regional de Aprendizado de Máquina e Inteligência Artificial da Região Sul},
pages = {268-271},
address = {Porto Alegre, Brazil},
organization = {SBC},
abstract = {O presente artigo tem como objetivo avaliar o uso de métodos de extração de atributos baseados em embeddings para a tarefa de reconhecimento de emoções. Para isso, foi utilizado o conjunto de dados IEMOCAP e 9 modelos classificadores foram treinados e testados com 11 conjuntos de atributos diferentes. Como resultado, foi observado que o modelo trillsson5 de extração de atributos resultou na combinação de melhor acurácia, o que sugere que os modelos baseados em embeddings podem superar os demais na tarefa.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
O presente artigo tem como objetivo avaliar o uso de métodos de extração de atributos baseados em embeddings para a tarefa de reconhecimento de emoções. Para isso, foi utilizado o conjunto de dados IEMOCAP e 9 modelos classificadores foram treinados e testados com 11 conjuntos de atributos diferentes. Como resultado, foi observado que o modelo trillsson5 de extração de atributos resultou na combinação de melhor acurácia, o que sugere que os modelos baseados em embeddings podem superar os demais na tarefa. |
| Dopke, Luan; Aires, João; Lira, Juliana; Hübner, Lilian; Griebler, Dalvan Detecção da Fala Característica da Doença de Alzheimer Utilizando uma Abordagem Agnóstica de Idioma Inproceedings doi In: Anais da I Escola Regional de Aprendizado de Máquina e Inteligência Artificial da Região Sul, pp. 96-99, SBC Porto Alegre, Brazil, 2025. @inproceedings{DOPKE:ERAMIA:25,
title = {Detecção da Fala Característica da Doença de Alzheimer Utilizando uma Abordagem Agnóstica de Idioma },
author = {Luan Dopke and João Aires and Juliana Lira and Lilian Hübner and Dalvan Griebler},
url = {https://doi.org/10.5753/eramiars.2025.16670},
doi = {10.5753/eramiars.2025.16670},
year = {2025},
date = {2025-11-01},
booktitle = {Anais da I Escola Regional de Aprendizado de Máquina e Inteligência Artificial da Região Sul},
pages = {96-99},
address = {Porto Alegre, Brazil},
organization = {SBC},
abstract = {Este trabalho propõe uma abordagem de classificação agnóstica ao idioma para a detecção da doença de Alzheimer a partir de características da fala. Para isto são extraídas características acústicas e textuais de dados em inglês e português brasileiro para treinar modelos de Aprendizado de Máquina. Os resultados revelaram que os índices lexicais e o conjunto ComParE contribuem para a generalização do modelo entre os idiomas.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Este trabalho propõe uma abordagem de classificação agnóstica ao idioma para a detecção da doença de Alzheimer a partir de características da fala. Para isto são extraídas características acústicas e textuais de dados em inglês e português brasileiro para treinar modelos de Aprendizado de Máquina. Os resultados revelaram que os índices lexicais e o conjunto ComParE contribuem para a generalização do modelo entre os idiomas. |
| Martins, Eduardo; Hoffmann, Renato; Alf, Lucas; Griebler, Dalvan Interface para Programação de Pipelines Lineares Tolerantes a Falha para MPI Padrão C++ Inproceedings doi In: Anais do XXVI Simpósio em Sistemas Computacionais de Alto Desempenho, pp. 133-144, SBC, Bonito, Brazil, 2025. @inproceedings{MARTINS:SSCAD:25,
title = {Interface para Programação de Pipelines Lineares Tolerantes a Falha para MPI Padrão C++},
author = {Eduardo Martins and Renato Hoffmann and Lucas Alf and Dalvan Griebler},
url = {https://doi.org/10.5753/sscad.2025.15867},
doi = {10.5753/sscad.2025.15867},
year = {2025},
date = {2025-10-01},
booktitle = {Anais do XXVI Simpósio em Sistemas Computacionais de Alto Desempenho},
pages = {133-144},
publisher = {SBC},
address = {Bonito, Brazil},
series = {SSCAD'25},
abstract = {Sistemas de processamento de stream são projetados para operar continuamente e devem ser capazes de se recuperar em caso de falhas. No entanto, programar aplicações de alto desempenho em ambientes distribuídos introduz uma alta complexidade de desenvolvimento. Este trabalho apresenta uma interface de programação que facilita a construção de pipelines lineares tolerantes a falhas para aplicações de processamento de stream em C++. A solução utiliza MPI (Message Passing Interface) para comunicação e o protocolo ABS (Asynchronous Barrier Snapshotting) juntamente com um agente monitor para a etapa de recuperação. Os resultados experimentais indicam uma redução significativa no tempo estimado de desenvolvimento para o programador, com impacto médio de -0.98% até 6.73% na vazão das aplicações. Além disso, o processo de recuperação mitiga o impacto das falhas na vazão do programa.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Sistemas de processamento de stream são projetados para operar continuamente e devem ser capazes de se recuperar em caso de falhas. No entanto, programar aplicações de alto desempenho em ambientes distribuídos introduz uma alta complexidade de desenvolvimento. Este trabalho apresenta uma interface de programação que facilita a construção de pipelines lineares tolerantes a falhas para aplicações de processamento de stream em C++. A solução utiliza MPI (Message Passing Interface) para comunicação e o protocolo ABS (Asynchronous Barrier Snapshotting) juntamente com um agente monitor para a etapa de recuperação. Os resultados experimentais indicam uma redução significativa no tempo estimado de desenvolvimento para o programador, com impacto médio de -0.98% até 6.73% na vazão das aplicações. Além disso, o processo de recuperação mitiga o impacto das falhas na vazão do programa. |
 | Ahmad, Sunna Imtiaz; Olczyk, Jakub; Araújo, Adriel S.; de Moura Medeiros, João Pedro; Teixeira, Vinicius C.; Gomes, Carlos F. A.; Magnaguagno, Maurício Cecílio; Roederer, Quinn; Dutra, Vinicius; Conley, R. Scott; Griebler, Dalvan; Eckert, George; Pinho, Márcio Sarroglia; Turkkahraman, Hakan A Novel Multimodal Deep Image Analysis Model for Predicting Extraction/Non-Extraction Decision Journal Article doi In: Orthodontics & Craniofacial Research, vol. na, pp. na, 2025. @article{AHMAD:OCR:25,
title = {A Novel Multimodal Deep Image Analysis Model for Predicting Extraction/Non-Extraction Decision},
author = {Sunna Imtiaz Ahmad and Jakub Olczyk and Adriel S. Araújo and João Pedro de Moura Medeiros and Vinicius C. Teixeira and Carlos F. A. Gomes and Maurício Cecílio Magnaguagno and Quinn Roederer and Vinicius Dutra and R. Scott Conley and Dalvan Griebler and George Eckert and Márcio Sarroglia Pinho and Hakan Turkkahraman},
url = {https://doi.org/10.1111/ocr.70057},
doi = {10.1111/ocr.70057},
year = {2025},
date = {2025-10-01},
urldate = {2025-10-01},
journal = {Orthodontics & Craniofacial Research},
volume = {na},
pages = {na},
publisher = {Wiley},
abstract = {This study aimed to develop a deep learning model classifier capable of predicting the extraction/non-extraction binary decision using lateral cephalometric radiographs (LCRs) and intraoral scans (IOS) to serve as an additional decision-support tool for orthodontists. Materials and Methods The dataset was composed of LCRs and IOS from 617 patients (mean age: 18.2, 63.5% female) treated at the Indiana University School of Dentistry. Subjects were categorised into two groups: extraction (192) and non-extraction (425). Two sets of features were extracted from IOS: traditional arch measurements and novel tooth spatial features. For LCRs, features were derived using CephNet-based landmark detection (Land), a convolutional autoencoder (AE), and the dimensionality was reduced using Principal Component Analysis (PCA). Models were evaluated using accuracy, sensitivity, specificity, positive predictive value (PPV or precision), negative predictive value (NPV), positive likelihood ratio (LR+), negative likelihood ratio (LR−), and F1 score. Results IOS + Land model achieved the highest overall accuracy (77%) and F1 score (0.62), with strong specificity (83%) and PPV (62%). In contrast, the Land model yielded the highest sensitivity (82%), but at the cost of lower specificity (57%). McNemar's test revealed that the AE model was significantly less accurate than IOS + AE (p = 0.048), IOS + Land (p = 0.006), and IOS + AE + Land (p = 0.005). Conclusion Deep learning models can predict the extraction/non-extraction decision using IOS and LCRs with high accuracy and diagnostic performance. Multimodal approaches, particularly those integrating IOS with cephalometric landmarks, demonstrate superior accuracy, sensitivity, and specificity compared to single-modality models.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
This study aimed to develop a deep learning model classifier capable of predicting the extraction/non-extraction binary decision using lateral cephalometric radiographs (LCRs) and intraoral scans (IOS) to serve as an additional decision-support tool for orthodontists. Materials and Methods The dataset was composed of LCRs and IOS from 617 patients (mean age: 18.2, 63.5% female) treated at the Indiana University School of Dentistry. Subjects were categorised into two groups: extraction (192) and non-extraction (425). Two sets of features were extracted from IOS: traditional arch measurements and novel tooth spatial features. For LCRs, features were derived using CephNet-based landmark detection (Land), a convolutional autoencoder (AE), and the dimensionality was reduced using Principal Component Analysis (PCA). Models were evaluated using accuracy, sensitivity, specificity, positive predictive value (PPV or precision), negative predictive value (NPV), positive likelihood ratio (LR+), negative likelihood ratio (LR−), and F1 score. Results IOS + Land model achieved the highest overall accuracy (77%) and F1 score (0.62), with strong specificity (83%) and PPV (62%). In contrast, the Land model yielded the highest sensitivity (82%), but at the cost of lower specificity (57%). McNemar's test revealed that the AE model was significantly less accurate than IOS + AE (p = 0.048), IOS + Land (p = 0.006), and IOS + AE + Land (p = 0.005). Conclusion Deep learning models can predict the extraction/non-extraction decision using IOS and LCRs with high accuracy and diagnostic performance. Multimodal approaches, particularly those integrating IOS with cephalometric landmarks, demonstrate superior accuracy, sensitivity, and specificity compared to single-modality models. |
| Araujo, Gabriell; Griebler, Dalvan; Fernandes, Luiz Gustavo Performance, Portability, and Productivity of HIP on GPUs with NAS Parallel Benchmarks Inproceedings doi In: 2025 IEEE/SBC 37th International Symposium on Computer Architecture and High Performance Computing (SBAC-PAD), pp. 204-214, IEEE, Bonito, Brazil, 2025. @inproceedings{ARAUJO:SBAC-PAD:25,
title = {Performance, Portability, and Productivity of HIP on GPUs with NAS Parallel Benchmarks},
author = {Gabriell Araujo and Dalvan Griebler and Luiz Gustavo Fernandes},
url = {https://doi.org/10.1109/SBAC-PAD66369.2025.00027},
doi = {10.1109/SBAC-PAD66369.2025.00027},
year = {2025},
date = {2025-10-01},
booktitle = {2025 IEEE/SBC 37th International Symposium on Computer Architecture and High Performance Computing (SBAC-PAD)},
pages = {204-214},
publisher = {IEEE},
address = {Bonito, Brazil},
series = {SBAC-PAD'25},
abstract = {Graphics Processing Units (GPUs) are powerful, massively parallel processors that have become ubiquitous in modern computing. In recent years, the GPU market has diversified, with vendors like AMD and Intel offering high-performance alternatives to NVIDIA. However, most applications are written using NVIDIA's CUDA API, which is incompatible with non-NVIDIA GPUs, creating significant challenges for developers who must port their code to different architectures. To address this issue, AMD developed the Heterogeneous-Compute Interface for Portability (HIP), an open-source API for cross-vendor GPU programming. However, HIP is relatively new, leaving gaps in the literature regarding its performance, portability, and productivity. In this paper, we evaluate HIP using the NAS Parallel Benchmarks (NPB), a CFD-based suite maintained by NASA. We present the first HIP-based implementation of NPB and conduct experiments on integrated and discrete GPUs from NVIDIA, AMD, and Intel. Our results provide novel insights into HIP’s performance and portability, particularly for integrated GPUs and Intel discrete GPUs, which have been underrepresented in prior studies. We also assess productivity using different metrics to quantify the programming effort of HIP-based implementations. This work addresses key gaps in the literature, offering valuable data and insights for developers targeting emerging GPU architectures.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Graphics Processing Units (GPUs) are powerful, massively parallel processors that have become ubiquitous in modern computing. In recent years, the GPU market has diversified, with vendors like AMD and Intel offering high-performance alternatives to NVIDIA. However, most applications are written using NVIDIA's CUDA API, which is incompatible with non-NVIDIA GPUs, creating significant challenges for developers who must port their code to different architectures. To address this issue, AMD developed the Heterogeneous-Compute Interface for Portability (HIP), an open-source API for cross-vendor GPU programming. However, HIP is relatively new, leaving gaps in the literature regarding its performance, portability, and productivity. In this paper, we evaluate HIP using the NAS Parallel Benchmarks (NPB), a CFD-based suite maintained by NASA. We present the first HIP-based implementation of NPB and conduct experiments on integrated and discrete GPUs from NVIDIA, AMD, and Intel. Our results provide novel insights into HIP’s performance and portability, particularly for integrated GPUs and Intel discrete GPUs, which have been underrepresented in prior studies. We also assess productivity using different metrics to quantify the programming effort of HIP-based implementations. This work addresses key gaps in the literature, offering valuable data and insights for developers targeting emerging GPU architectures. |
| Faé, Leonardo; Griebler, Dalvan Towards GPU Parallelism Abstractions in Rust: A Case Study with Linear Pipelines Inproceedings doi In: Anais do XXIX Simpósio Brasileiro de Linguagens de Programação, pp. 75-83, SBC, Recife/PE, 2025. @inproceedings{FAE:SBLP:25,
title = {Towards GPU Parallelism Abstractions in Rust: A Case Study with Linear Pipelines},
author = {Leonardo Faé and Dalvan Griebler},
url = {https://sol.sbc.org.br/index.php/sblp/article/view/36951/36736},
doi = {10.5753/sblp.2025.13152},
year = {2025},
date = {2025-09-01},
booktitle = {Anais do XXIX Simpósio Brasileiro de Linguagens de Programação},
pages = {75-83},
publisher = {SBC},
address = {Recife/PE},
series = {SBLP'25},
abstract = {Programming Graphics Processing Units (GPUs) for general-purpose computation remains a daunting task, often requiring specialized knowledge of low-level APIs like CUDA or OpenCL. While Rust has emerged as a modern, safe, and performant systems programming language, its adoption in the GPU computing domain is still nascent. Existing approaches often involve intricate compiler modifications or complex static analysis to adapt CPU-centric Rust code for GPU execution. This paper presents a novel high-level abstraction in Rust, leveraging procedural macros to automatically generate GPU-executable code from constrained Rust functions. Our approach simplifies the code generation process by imposing specific limitations on how these functions can be written, thereby avoiding the need for complex static analysis. We demonstrate the feasibility and effectiveness of our abstraction through a case study involving linear pipeline parallel patterns, a common structure in data-parallel applications. By transforming Rust functions annotated as source, stage, or sink in a pipeline, we enable straightforward execution on the GPU. We evaluate our abstraction's performance and programmability using two benchmark applications: sobel (image filtering) and latbol (fluid simulation), comparing it against manual OpenCL implementations. Our results indicate that while incurring a small performance overhead in some cases, our approach significantly reduces development effort and, in certain scenarios, achieves comparable or even superior throughput compared to CPU-based parallelism.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Programming Graphics Processing Units (GPUs) for general-purpose computation remains a daunting task, often requiring specialized knowledge of low-level APIs like CUDA or OpenCL. While Rust has emerged as a modern, safe, and performant systems programming language, its adoption in the GPU computing domain is still nascent. Existing approaches often involve intricate compiler modifications or complex static analysis to adapt CPU-centric Rust code for GPU execution. This paper presents a novel high-level abstraction in Rust, leveraging procedural macros to automatically generate GPU-executable code from constrained Rust functions. Our approach simplifies the code generation process by imposing specific limitations on how these functions can be written, thereby avoiding the need for complex static analysis. We demonstrate the feasibility and effectiveness of our abstraction through a case study involving linear pipeline parallel patterns, a common structure in data-parallel applications. By transforming Rust functions annotated as source, stage, or sink in a pipeline, we enable straightforward execution on the GPU. We evaluate our abstraction's performance and programmability using two benchmark applications: sobel (image filtering) and latbol (fluid simulation), comparing it against manual OpenCL implementations. Our results indicate that while incurring a small performance overhead in some cases, our approach significantly reduces development effort and, in certain scenarios, achieves comparable or even superior throughput compared to CPU-based parallelism. |
| Ahmad, Sunna I.; Araújo, Adriel S.; Teixeira, Vinicius C.; Gomes, Carlos F. A.; Dutra, Vinicius; Roederer, Quinn; Conley, R. Scott; Griebler, Dalvan; Pinho, Márcio S.; Turkkahraman, Hakan A Novel AI-driven Automated Orthodontic Model Analysis to Improve Classification of Orthodontic Extraction Cases Inproceedings doi In: 2025 IEEE 49th Annual Computers, Software, and Applications Conference (COMPSAC), pp. 1853-1860, IEEE, Toronto, Canada, 2025. @inproceedings{AHMAD:COMPSAC:25,
title = {A Novel AI-driven Automated Orthodontic Model Analysis to Improve Classification of Orthodontic Extraction Cases},
author = {Sunna I. Ahmad and Adriel S. Araújo and Vinicius C. Teixeira and Carlos F. A. Gomes and Vinicius Dutra and Quinn Roederer and R. Scott Conley and Dalvan Griebler and Márcio S. Pinho and Hakan Turkkahraman},
url = {https://doi.org/10.1109/COMPSAC65507.2025.00254},
doi = {10.1109/COMPSAC65507.2025.00254},
year = {2025},
date = {2025-07-01},
booktitle = {2025 IEEE 49th Annual Computers, Software, and Applications Conference (COMPSAC)},
pages = {1853-1860},
publisher = {IEEE},
address = {Toronto, Canada},
abstract = {Malocclusion, a prevalent dental condition worldwide, necessitates orthodontic intervention to correct tooth misalignment and improve oral health. Treatment can involve extraction of permanent teeth, depending on dental crowding, jaw relationships, and facial aesthetics. Today, clinical decision support systems have introduced machine learning (ML) to assist orthodontists in determining optimal treatment plans. This study explores the development of a novel, fully automated method for extracting dentoalveolar features from 3D intraoral scans (IOS), aiming to enhance orthodontic decision-making. Using deep learning-based IOS segmentation as basis, dental measurements were developed and utilized to train supervised ML classifiers, including support vector machines (SVM), logistic regression, decision trees, and random forests. An ensemble of SVM models demonstrated the highest accuracy (73%) in predicting extraction decisions, with these novel domain-specific features proving more informative than traditional dental arch measurements. While we can make further improvements not only in the automated segmentation but also by applying feature selection, the results highlight the potential of AI-driven analysis to streamline orthodontic workflows, reduce manual intervention and improve clinical efficiency.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Malocclusion, a prevalent dental condition worldwide, necessitates orthodontic intervention to correct tooth misalignment and improve oral health. Treatment can involve extraction of permanent teeth, depending on dental crowding, jaw relationships, and facial aesthetics. Today, clinical decision support systems have introduced machine learning (ML) to assist orthodontists in determining optimal treatment plans. This study explores the development of a novel, fully automated method for extracting dentoalveolar features from 3D intraoral scans (IOS), aiming to enhance orthodontic decision-making. Using deep learning-based IOS segmentation as basis, dental measurements were developed and utilized to train supervised ML classifiers, including support vector machines (SVM), logistic regression, decision trees, and random forests. An ensemble of SVM models demonstrated the highest accuracy (73%) in predicting extraction decisions, with these novel domain-specific features proving more informative than traditional dental arch measurements. While we can make further improvements not only in the automated segmentation but also by applying feature selection, the results highlight the potential of AI-driven analysis to streamline orthodontic workflows, reduce manual intervention and improve clinical efficiency. |
| Guder, Larissa; Aires, João Paulo; Manssour, Isabel H; Griebler, Dalvan GoViz: A Visualization Tool for Empowering Transparency in Government Speech Inproceedings doi In: Annual International Conference on Digital Government Research, pp. 954, Digital Government Society, Porto Alegre, Brasil, 2025. @inproceedings{GUDER:DGO:25,
title = {GoViz: A Visualization Tool for Empowering Transparency in Government Speech},
author = {Larissa Guder and João Paulo Aires and Isabel H Manssour and Dalvan Griebler},
url = {https://doi.org/10.59490/dgo.2025.954},
doi = {10.59490/dgo.2025.954},
year = {2025},
date = {2025-05-01},
booktitle = {Annual International Conference on Digital Government Research},
volume = {26},
pages = {954},
publisher = {Digital Government Society},
address = {Porto Alegre, Brasil},
abstract = {Public speech from government figures often describes relevant actions that can impact the population's lives. However, most people do not have time and access to analyze and understand public speech. Such a scenario narrows the participation of the people in the main discussions, which leads to multiple misunderstandings. In this work, we propose GoViz, a tool that automatically produces visual representations to outline governmental speeches regarding the subject, its main actors, and how they connect to the discussion topics. GoViz processes natural language from speech transcriptions in a pipeline that identifies part-of-speech elements, named-entities, and the relation between persons, making speech content more accessible and insightful. Using publicly available data, we evaluate our tool in two different languages (Portuguese and English). The results demonstrate that the visualizations from both data facilitate understanding the speech content. Thus, our main contribution is to encourage the participation of citizens in parliamentary issues, allowing a simplified and visually engaging avenue to access long speeches and fostering improved communication between parliamentarians and the population.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Public speech from government figures often describes relevant actions that can impact the population's lives. However, most people do not have time and access to analyze and understand public speech. Such a scenario narrows the participation of the people in the main discussions, which leads to multiple misunderstandings. In this work, we propose GoViz, a tool that automatically produces visual representations to outline governmental speeches regarding the subject, its main actors, and how they connect to the discussion topics. GoViz processes natural language from speech transcriptions in a pipeline that identifies part-of-speech elements, named-entities, and the relation between persons, making speech content more accessible and insightful. Using publicly available data, we evaluate our tool in two different languages (Portuguese and English). The results demonstrate that the visualizations from both data facilitate understanding the speech content. Thus, our main contribution is to encourage the participation of citizens in parliamentary issues, allowing a simplified and visually engaging avenue to access long speeches and fostering improved communication between parliamentarians and the population. |
 | Czarnul, Paweł; Antal, Marcel; Baniata, Hamza; Griebler, Dalvan; Kertesz, Attila; Kessler, Christoph W.; Kouloumpris, Andreas; Kovačić, Salko; Markus, Andras; Michael, Maria K.; Nikolaou, Panagiota; Öz, Isil; Prodan, Radu; Rakić, Gordana Optimization of resource-aware parallel and distributed computing: a review Journal Article doi In: The Journal of Supercomputing, vol. 81, no. 7, pp. 848, 2025. @article{CZARNUL:Supercomputing:25,
title = {Optimization of resource-aware parallel and distributed computing: a review},
author = {Paweł Czarnul and Marcel Antal and Hamza Baniata and Dalvan Griebler and Attila Kertesz and Christoph W. Kessler and Andreas Kouloumpris and Salko Kovačić and Andras Markus and Maria K. Michael and Panagiota Nikolaou and Isil Öz and Radu Prodan and Gordana Rakić},
url = {https://doi.org/10.1007/s11227-025-07295-7},
doi = {10.1007/s11227-025-07295-7},
year = {2025},
date = {2025-05-01},
urldate = {2025-05-01},
journal = {The Journal of Supercomputing},
volume = {81},
number = {7},
pages = {848},
publisher = {Springer},
abstract = {This paper presents a review of state-of-the-art solutions concerning the optimization of computing in the field of parallel and distributed systems. Firstly, we contribute by identifying resources and quality metrics in this context including servers, network interconnects, storage systems, computational devices as well as execution time/performance, energy, security, and error vulnerability, respectively. We subsequently identify commonly used problem formulations and algorithms for integer linear programming, greedy algorithms, dynamic programming, genetic algorithms, particle swarm optimization, ant colony optimization, game theory, and reinforcement learning. Afterward, we characterize frequently considered optimization problems by stating these terms in domains such as data centers, cloud, fog, blockchain, high performance, and volunteer computing. Based on the extensive analysis, we identify how particular resources and corresponding quality metrics are considered in these domains and which problem formulations are used for which system types, either parallel or distributed environments. This allows us to formulate open research problems and challenges in this field and analyze research interest in problem formulations/domains in recent years.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
This paper presents a review of state-of-the-art solutions concerning the optimization of computing in the field of parallel and distributed systems. Firstly, we contribute by identifying resources and quality metrics in this context including servers, network interconnects, storage systems, computational devices as well as execution time/performance, energy, security, and error vulnerability, respectively. We subsequently identify commonly used problem formulations and algorithms for integer linear programming, greedy algorithms, dynamic programming, genetic algorithms, particle swarm optimization, ant colony optimization, game theory, and reinforcement learning. Afterward, we characterize frequently considered optimization problems by stating these terms in domains such as data centers, cloud, fog, blockchain, high performance, and volunteer computing. Based on the extensive analysis, we identify how particular resources and corresponding quality metrics are considered in these domains and which problem formulations are used for which system types, either parallel or distributed environments. This allows us to formulate open research problems and challenges in this field and analyze research interest in problem formulations/domains in recent years. |
| Faé, Leonardo G.; Griebler, Dalvan Novos benchmarks para aplicações de streaming em Rust Inproceedings doi In: Escola Regional de Alto Desempenho (ERAD-RS), pp. 145-146, Sociedade Brasileira de Computação (SBC), Foz do Iguaçu, PR, BR, 2025. @inproceedings{FAE:ERAD:25,
title = {Novos benchmarks para aplicações de streaming em Rust},
author = {Leonardo G. Faé and Dalvan Griebler},
url = {https://doi.org/10.5753/eradrs.2025.6800},
doi = {10.5753/eradrs.2025.6800},
year = {2025},
date = {2025-04-01},
booktitle = {Escola Regional de Alto Desempenho (ERAD-RS)},
pages = {145-146},
publisher = {Sociedade Brasileira de Computação (SBC)},
address = {Foz do Iguaçu, PR, BR},
abstract = {Rust é uma nova linguagem de programação de baixo nível com foco em desempenho e segurança. RustStreamBench é um conjunto de benchmarks criados para medir a performance de Rust em aplicações de processamento de stream. Propomos duas aplicações novas: sobel e latbol, para adicionar a esse conjunto, com características de processamento diferentes.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Rust é uma nova linguagem de programação de baixo nível com foco em desempenho e segurança. RustStreamBench é um conjunto de benchmarks criados para medir a performance de Rust em aplicações de processamento de stream. Propomos duas aplicações novas: sobel e latbol, para adicionar a esse conjunto, com características de processamento diferentes. |
| Alf, Lucas M.; Griebler, Dalvan Proposta de geração de código paralelo tolerante a falhas com SPar para aplicações de stream Inproceedings doi In: Escola Regional de Alto Desempenho (ERAD-RS), pp. 135-136, Sociedade Brasileira de Computação (SBC), Foz do Iguaçu, PR, BR, 2025. @inproceedings{ALF:ERAD:25,
title = {Proposta de geração de código paralelo tolerante a falhas com SPar para aplicações de stream},
author = {Lucas M. Alf and Dalvan Griebler},
url = {https://doi.org/10.5753/eradrs.2025.6630},
doi = {10.5753/eradrs.2025.6630},
year = {2025},
date = {2025-04-01},
booktitle = {Escola Regional de Alto Desempenho (ERAD-RS)},
pages = {135-136},
publisher = {Sociedade Brasileira de Computação (SBC)},
address = {Foz do Iguaçu, PR, BR},
abstract = {Devido a necessidade de sistemas de processamento de stream serem executados por longos períodos de tempo, até mesmo de forma indefinida, faz se necessário a presença de mecanismos de tolerância a falhas para lidar com possíveis imprevistos. Atualmente, a SPar não fornece tolerância a falhas ou garantias de entrega de mensagens. Motivado por esse fator, o principal objetivo desta pesquisa envolve a exploração de formas de introduzir aspectos de tolerância a falhas na geração de código paralelo com a SPar.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Devido a necessidade de sistemas de processamento de stream serem executados por longos períodos de tempo, até mesmo de forma indefinida, faz se necessário a presença de mecanismos de tolerância a falhas para lidar com possíveis imprevistos. Atualmente, a SPar não fornece tolerância a falhas ou garantias de entrega de mensagens. Motivado por esse fator, o principal objetivo desta pesquisa envolve a exploração de formas de introduzir aspectos de tolerância a falhas na geração de código paralelo com a SPar. |
| Hoffmann, Renato Barreto; Griebler, Dalvan Proposta de Paralelismo Semi-Automático de Pipelines Lineares em C++ Inproceedings doi In: Escola Regional de Alto Desempenho (ERAD-RS), pp. 149-150, Sociedade Brasileira de Computação (SBC), Foz do Iguaçu, PR, BR, 2025. @inproceedings{HOFFMANN:ERAD:25,
title = {Proposta de Paralelismo Semi-Automático de Pipelines Lineares em C++},
author = {Renato Barreto Hoffmann and Dalvan Griebler},
url = {https://doi.org/10.5753/eradrs.2025.6803},
doi = {10.5753/eradrs.2025.6803},
year = {2025},
date = {2025-04-01},
booktitle = {Escola Regional de Alto Desempenho (ERAD-RS)},
pages = {149-150},
publisher = {Sociedade Brasileira de Computação (SBC)},
address = {Foz do Iguaçu, PR, BR},
abstract = {Atingir alto desempenho, portabilidade e produtividade continua sendo um desafio fundamental na comunidade de computação de alto desempenho. Entretanto, a paralelização automática de programas nunca foi amplamente adotada devido à sua inconsistência. Assim, a tarefa onerosa da programação paralela permanece sob a responsabilidade direta dos programadores. Esse trabalho propõe-se estudar técnicas de paralelismo com compiladores.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Atingir alto desempenho, portabilidade e produtividade continua sendo um desafio fundamental na comunidade de computação de alto desempenho. Entretanto, a paralelização automática de programas nunca foi amplamente adotada devido à sua inconsistência. Assim, a tarefa onerosa da programação paralela permanece sob a responsabilidade direta dos programadores. Esse trabalho propõe-se estudar técnicas de paralelismo com compiladores. |
| Fim, Gabriel Rustick; Griebler, Dalvan Proposta de Paralelismo de Stream com suporte a Multi-GPU em Ambientes Distribuídos Inproceedings doi In: Escola Regional de Alto Desempenho (ERAD-RS), pp. 141-142, Sociedade Brasileira de Computação (SBC), Foz do Iguaçu, PR, BR, 2025. @inproceedings{FIM:ERAD:25,
title = {Proposta de Paralelismo de Stream com suporte a Multi-GPU em Ambientes Distribuídos},
author = {Gabriel Rustick Fim and Dalvan Griebler},
url = {https://doi.org/10.5753/eradrs.2025.6786},
doi = {10.5753/eradrs.2025.6786},
year = {2025},
date = {2025-04-01},
booktitle = {Escola Regional de Alto Desempenho (ERAD-RS)},
pages = {141-142},
publisher = {Sociedade Brasileira de Computação (SBC)},
address = {Foz do Iguaçu, PR, BR},
abstract = {Considerando a necessidade de tempos de processamento mais rápidos, a utilização de ambientes multi-aceleradores vem se tornando cada vez mais proeminente na literatura, infelizmente programar para estes tipos de ambientes apresenta uma série de desafios que fazem com que o desenvolvimento de códigos direcionados a multi-GPUs exija um maior esforço de programação. Propomos investigar como utilizar anotações C++ para simplificar a geração de código multi-GPU visando execuções em ambientes distribuídos sem comprometer o desempenho.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Considerando a necessidade de tempos de processamento mais rápidos, a utilização de ambientes multi-aceleradores vem se tornando cada vez mais proeminente na literatura, infelizmente programar para estes tipos de ambientes apresenta uma série de desafios que fazem com que o desenvolvimento de códigos direcionados a multi-GPUs exija um maior esforço de programação. Propomos investigar como utilizar anotações C++ para simplificar a geração de código multi-GPU visando execuções em ambientes distribuídos sem comprometer o desempenho. |
| Martins, Eduardo M.; Griebler, Dalvan Proposta de Tolerância a Falhas Baseada em Log para Paralelismo de Stream Inproceedings doi In: Escola Regional de Alto Desempenho (ERAD-RS), pp. 129-130, Sociedade Brasileira de Computação (SBC), Foz do Iguaçu, PR, BR, 2025. @inproceedings{MARTINS:ERAD:25,
title = {Proposta de Tolerância a Falhas Baseada em Log para Paralelismo de Stream},
author = {Eduardo M. Martins and Dalvan Griebler},
url = {https://doi.org/10.5753/eradrs.2025.6505},
doi = {10.5753/eradrs.2025.6505},
year = {2025},
date = {2025-04-01},
booktitle = {Escola Regional de Alto Desempenho (ERAD-RS)},
pages = {129-130},
publisher = {Sociedade Brasileira de Computação (SBC)},
address = {Foz do Iguaçu, PR, BR},
abstract = {Sistemas de processamento de stream precisam ser capazes de lidar com grandes volumes de dados e por tempo indeterminado. A necessidade de analises em tempo real tornam o paralelismo e mecanismos de tolerância a falhas essenciais nesse contexto. Nesta pesquisa propomos implementar suporte à um protocolo de tolerância a falhas baseado em log em uma biblioteca para processamento de stream de alto nível para C++.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Sistemas de processamento de stream precisam ser capazes de lidar com grandes volumes de dados e por tempo indeterminado. A necessidade de analises em tempo real tornam o paralelismo e mecanismos de tolerância a falhas essenciais nesse contexto. Nesta pesquisa propomos implementar suporte à um protocolo de tolerância a falhas baseado em log em uma biblioteca para processamento de stream de alto nível para C++. |
| Bianchessi, Lucas S.; Faé, Leonardo G.; Griebler, Dalvan Estendendo o RustStreamBench com Renoir e novos benchmarks Inproceedings doi In: Escola Regional de Alto Desempenho (ERAD-RS), pp. 73-76, Sociedade Brasileira de Computação (SBC), Foz do Iguaçu, PR, BR, 2025. @inproceedings{BIANCHESSI:ERAD:25,
title = {Estendendo o RustStreamBench com Renoir e novos benchmarks},
author = {Lucas S. Bianchessi and Leonardo G. Faé and Dalvan Griebler},
url = {https://doi.org/10.5753/eradrs.2025.6811},
doi = {10.5753/eradrs.2025.6811},
year = {2025},
date = {2025-04-01},
booktitle = {Escola Regional de Alto Desempenho (ERAD-RS)},
pages = {73-76},
publisher = {Sociedade Brasileira de Computação (SBC)},
address = {Foz do Iguaçu, PR, BR},
abstract = {Neste trabalho, foram adicionados ao RustStreamBench dois novos benchmarks e uma biblioteca de paralelismo nova, o Renoir. Renoir apesar de atingir um desempenho semelhante com bibliotecas de paralelismo mais consolidadas, a biblioteca não é capaz de paralelizar o benchmark image-processing, por demandar memória demais, um erro semelhante `a versão distribuída da biblioteca que estourava a pilha independente do benchmark.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Neste trabalho, foram adicionados ao RustStreamBench dois novos benchmarks e uma biblioteca de paralelismo nova, o Renoir. Renoir apesar de atingir um desempenho semelhante com bibliotecas de paralelismo mais consolidadas, a biblioteca não é capaz de paralelizar o benchmark image-processing, por demandar memória demais, um erro semelhante `a versão distribuída da biblioteca que estourava a pilha independente do benchmark. |
| Araujo, Gabriell; Griebler, Dalvan Proposta de uma suíte de benchmarks para avaliar o paralelismo de stream em GPUs Inproceedings doi In: Escola Regional de Alto Desempenho (ERAD-RS), pp. 131-132, Sociedade Brasileira de Computação (SBC), Foz do Iguaçu, PR, BR, 2025. @inproceedings{ARAUJO:ERAD:25,
title = {Proposta de uma suíte de benchmarks para avaliar o paralelismo de stream em GPUs},
author = {Gabriell Araujo and Dalvan Griebler},
url = {https://doi.org/10.5753/eradrs.2025.6517},
doi = {10.5753/eradrs.2025.6517},
year = {2025},
date = {2025-04-01},
booktitle = {Escola Regional de Alto Desempenho (ERAD-RS)},
pages = {131-132},
publisher = {Sociedade Brasileira de Computação (SBC)},
address = {Foz do Iguaçu, PR, BR},
abstract = {GPUs são essenciais para permitir o processamento de dados em tempo real em aplicações de stream. Porém, a literatura apresenta diferentes limitações concernentes a este tópico. Este trabalho propõe o desenvolvimento de uma suíte de benchmarks de aplicações de stream aceleradas por GPUs, bem como uma investigação de técnicas de programação e desafios nesse domínio.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
GPUs são essenciais para permitir o processamento de dados em tempo real em aplicações de stream. Porém, a literatura apresenta diferentes limitações concernentes a este tópico. Este trabalho propõe o desenvolvimento de uma suíte de benchmarks de aplicações de stream aceleradas por GPUs, bem como uma investigação de técnicas de programação e desafios nesse domínio. |
 | Rockenbach, Dinei A.; Araujo, Gabriell; Griebler, Dalvan; Fernandes, Luiz Gustavo GSParLib: A multi-level programming interface unifying OpenCL and CUDA for expressing stream and data parallelism Journal Article doi In: Computer Standards & Interfaces, vol. 92, pp. 103922, 2025. @article{ROCKENBACH:GSParLib:CSI:25,
title = {GSParLib: A multi-level programming interface unifying OpenCL and CUDA for expressing stream and data parallelism},
author = {Dinei A. Rockenbach and Gabriell Araujo and Dalvan Griebler and Luiz Gustavo Fernandes},
url = {https://doi.org/10.1016/j.csi.2024.103922},
doi = {10.1016/j.csi.2024.103922},
year = {2025},
date = {2025-03-01},
urldate = {2025-03-01},
journal = {Computer Standards & Interfaces},
volume = {92},
pages = {103922},
publisher = {Elsevier},
abstract = {The evolution of Graphics Processing Units (GPUs) has allowed the industry to overcome long-lasting problems and challenges. Many belong to the stream processing domain, whose central aspect is continuously receiving and processing data from streaming data producers such as cameras and sensors. Nonetheless, programming GPUs is challenging because it requires deep knowledge of many-core programming, mechanisms and optimizations for GPUs. Current GPU programming standards do not target stream processing and present programmability and code portability limitations. Among our main scientific contributions resides GSParLib, a C++ multi-level programming interface unifying CUDA and OpenCL for GPU processing on stream and data parallelism with negligible performance losses compared to manual implementations; GSParLib is organized in two layers: one for general-purpose computing and another for high-level structured programming based on parallel patterns; a methodology to provide unified and driver agnostic interfaces minimizing performance losses; a set of parallelism strategies and optimizations for GPU processing targeting stream and data parallelism; and new experiments covering GPU performance on applications exposing stream and data parallelism.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
The evolution of Graphics Processing Units (GPUs) has allowed the industry to overcome long-lasting problems and challenges. Many belong to the stream processing domain, whose central aspect is continuously receiving and processing data from streaming data producers such as cameras and sensors. Nonetheless, programming GPUs is challenging because it requires deep knowledge of many-core programming, mechanisms and optimizations for GPUs. Current GPU programming standards do not target stream processing and present programmability and code portability limitations. Among our main scientific contributions resides GSParLib, a C++ multi-level programming interface unifying CUDA and OpenCL for GPU processing on stream and data parallelism with negligible performance losses compared to manual implementations; GSParLib is organized in two layers: one for general-purpose computing and another for high-level structured programming based on parallel patterns; a methodology to provide unified and driver agnostic interfaces minimizing performance losses; a set of parallelism strategies and optimizations for GPU processing targeting stream and data parallelism; and new experiments covering GPU performance on applications exposing stream and data parallelism. |
| Löff, Júnior; Hoffmann, Renato B.; Bianchessi, Arthur S.; Mallmann, Leonardo; Griebler, Dalvan; Binder, Walter NPB-PSTL: C++ STL Algorithms with Parallel Execution Policies in NAS Parallel Benchmarks Inproceedings doi In: 33rd Euromicro International Conference on Parallel, Distributed and Network-Based Processing (PDP), pp. 162-169, IEEE, Torino, Italy, 2025. @inproceedings{LOFF:PDP:25,
title = {NPB-PSTL: C++ STL Algorithms with Parallel Execution Policies in NAS Parallel Benchmarks},
author = {Júnior Löff and Renato B. Hoffmann and Arthur S. Bianchessi and Leonardo Mallmann and Dalvan Griebler and Walter Binder},
url = {https://doi.org/10.1109/PDP66500.2025.00030},
doi = {10.1109/PDP66500.2025.00030},
year = {2025},
date = {2025-03-01},
booktitle = {33rd Euromicro International Conference on Parallel, Distributed and Network-Based Processing (PDP)},
pages = {162-169},
publisher = {IEEE},
address = {Torino, Italy},
series = {PDP'25},
abstract = {The C++ language continually evolves through formal specifications established by its standards committee, proposing new features to maintain C++ as a relevant programming language while improving usability, performance, and portability across platforms. With the addition of parallel Standard Template Library (STL) algorithms in C++17, programmers can now leverage parallel processing capabilities via vendor-neutral parallel execution policies. This study presents an adaptation of the NAS Parallel Benchmarks (NPB)—a well-established suite of applications for evaluating parallel architectures-by porting its sequential C-style code to use C++ STL abstractions and performance-portable parallelism features. Our goals are to (1) assess the suitability of C++ STL for scientific applications like the ones in the NPB and (2) provide a comparative performance and portability of STL algorithms' parallel execution policies across different multicore architectures (x86 and AArch64). Results indicate that the performance of parallel STL algorithms is often close to that of optimized handwritten versions (OpenMP, Intel TBB, and FastFlow) on different architectures, with notable shortfalls. Across all NPB benchmarks, the STL algorithms' geometric mean shows sequential execution times that are between 3.76% and 6.9% higher, while parallel executions may reach a geometric mean of up to 21.21% higher execution time.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
The C++ language continually evolves through formal specifications established by its standards committee, proposing new features to maintain C++ as a relevant programming language while improving usability, performance, and portability across platforms. With the addition of parallel Standard Template Library (STL) algorithms in C++17, programmers can now leverage parallel processing capabilities via vendor-neutral parallel execution policies. This study presents an adaptation of the NAS Parallel Benchmarks (NPB)—a well-established suite of applications for evaluating parallel architectures-by porting its sequential C-style code to use C++ STL abstractions and performance-portable parallelism features. Our goals are to (1) assess the suitability of C++ STL for scientific applications like the ones in the NPB and (2) provide a comparative performance and portability of STL algorithms' parallel execution policies across different multicore architectures (x86 and AArch64). Results indicate that the performance of parallel STL algorithms is often close to that of optimized handwritten versions (OpenMP, Intel TBB, and FastFlow) on different architectures, with notable shortfalls. Across all NPB benchmarks, the STL algorithms' geometric mean shows sequential execution times that are between 3.76% and 6.9% higher, while parallel executions may reach a geometric mean of up to 21.21% higher execution time. |
| Hoffmann, Renato B.; Faé, Leonardo G.; Griebler, Dalvan; Li, Xinliang David; Pereira, Fernando Magno Quintão Automatic Synthesis of Specialized Hash Functions Inproceedings doi In: Proceedings of the 23rd ACM/IEEE International Symposium on Code Generation and Optimization, pp. 317-330, ACM, Las Vegas, NV, USA, 2025. @inproceedings{HOFFMANN:sepe:cgo:25,
title = {Automatic Synthesis of Specialized Hash Functions},
author = {Renato B. Hoffmann and Leonardo G. Faé and Dalvan Griebler and Xinliang David Li and Fernando Magno Quintão Pereira},
url = {https://doi.org/10.1145/3696443.3708940},
doi = {10.1145/3696443.3708940},
year = {2025},
date = {2025-03-01},
booktitle = {Proceedings of the 23rd ACM/IEEE International Symposium on Code Generation and Optimization},
pages = {317-330},
publisher = {ACM},
address = {Las Vegas, NV, USA},
series = {CGO '25},
abstract = {This paper introduces a technique for synthesizing hash functions specialized to particular byte formats. This code generation method leverages three prevalent patterns: (i) fixed-length keys, (ii) keys with common subsequences, and (iii) keys ranging on predetermined sequences of bytes. Code generation involves two algorithms: one identifies relevant regular expressions within key examples, and the other generates specialized hash functions based on these expressions. Comparative analysis demonstrates that the synthetic functions outperform the general-purpose hashes in the C++ Standard Template Library and the Google Abseil Library when keys are given in ascending, normal or uniform distribution. In applications where low-mixing hashes are acceptable, the synthetic functions achieve speedups ranging from 2% to 11% on full benchmarks, and speedups of almost 50x once only hashing speed is considered.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
This paper introduces a technique for synthesizing hash functions specialized to particular byte formats. This code generation method leverages three prevalent patterns: (i) fixed-length keys, (ii) keys with common subsequences, and (iii) keys ranging on predetermined sequences of bytes. Code generation involves two algorithms: one identifies relevant regular expressions within key examples, and the other generates specialized hash functions based on these expressions. Comparative analysis demonstrates that the synthetic functions outperform the general-purpose hashes in the C++ Standard Template Library and the Google Abseil Library when keys are given in ascending, normal or uniform distribution. In applications where low-mixing hashes are acceptable, the synthetic functions achieve speedups ranging from 2% to 11% on full benchmarks, and speedups of almost 50x once only hashing speed is considered. |
| Mencagli, Gabriele; Rymarchuk, Yuriy; Griebler, Dalvan PPOIJ: Shared-Nothing Parallel Patterns for Efficient Online Interval Joins over Data Streams Inproceedings doi In: Proceedings of the 19th ACM International Conference on Distributed and Event-Based Systems, pp. 51-61, ACM, Gothenburg, Sweden, 2025. @inproceedings{MENCAGLI:DEBS:25,
title = {PPOIJ: Shared-Nothing Parallel Patterns for Efficient Online Interval Joins over Data Streams},
author = {Gabriele Mencagli and Yuriy Rymarchuk and Dalvan Griebler},
url = {https://doi.org/10.1145/3701717.3730542},
doi = {10.1145/3701717.3730542},
year = {2025},
date = {2025-01-01},
booktitle = {Proceedings of the 19th ACM International Conference on Distributed and Event-Based Systems},
pages = {51-61},
publisher = {ACM},
address = {Gothenburg, Sweden},
series = {DEBS'25},
abstract = {Joining data streams is a fundamental stateful operator in stream processing. It involves evaluating join pairs of tuples from two streams that meet specific user-defined criteria. This operator is typically time-consuming and often represents the major bottleneck in several real-world continuous queries. This paper focuses on a specific class of join operator, named online interval join, where we seek join pairs of tuples that occur within a certain time frame of each other. Our contribution is to propose different parallel patterns for implementing this join operator efficiently in the presence of watermarked data streams and skewed key distributions. The proposed patterns comply with the shared-nothing parallelization paradigm, a popular paradigm adopted by most of the existing Stream Processing Engines. Among the proposed patterns, we introduce one based on hybrid parallelism, which is particularly effective in handling various scenarios in terms of key distribution, number of keys, batching, and parallelism as demonstrated in our experimental analysis.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Joining data streams is a fundamental stateful operator in stream processing. It involves evaluating join pairs of tuples from two streams that meet specific user-defined criteria. This operator is typically time-consuming and often represents the major bottleneck in several real-world continuous queries. This paper focuses on a specific class of join operator, named online interval join, where we seek join pairs of tuples that occur within a certain time frame of each other. Our contribution is to propose different parallel patterns for implementing this join operator efficiently in the presence of watermarked data streams and skewed key distributions. The proposed patterns comply with the shared-nothing parallelization paradigm, a popular paradigm adopted by most of the existing Stream Processing Engines. Among the proposed patterns, we introduce one based on hybrid parallelism, which is particularly effective in handling various scenarios in terms of key distribution, number of keys, batching, and parallelism as demonstrated in our experimental analysis. |
 | Araujo, Gabriell; Rockenbach, Dinei A.; Löff, Júnior; Griebler, Dalvan; Fernandes, Luiz G. A C++ annotation-based domain-specific language for expressing stream and data parallelism supporting CPU and GPU Journal Article doi In: Journal of Computer Languages, vol. 85, pp. 101369, 2025. @article{ARAUJO:COLA:25,
title = {A C++ annotation-based domain-specific language for expressing stream and data parallelism supporting CPU and GPU},
author = {Gabriell Araujo and Dinei A. Rockenbach and Júnior Löff and Dalvan Griebler and Luiz G. Fernandes},
url = {https://doi.org/10.1016/j.cola.2025.101369},
doi = {10.1016/j.cola.2025.101369},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
journal = {Journal of Computer Languages},
volume = {85},
pages = {101369},
publisher = {Elsevier},
abstract = {Graphics processing units (GPUs) and central processing units (CPUs) provide massive parallel computing in our modern computer systems (e.g., servers, desktops, smartphones, and laptops), and efficiently utilizing their processing power requires expertise in parallel programming. Mainly, domain-specific languages (DSLs) address this challenge by improving productivity and abstractions. SPar is a high-level DSL that promotes parallel programming abstractions for stream and data parallelism using C++ attribute annotations for serial code. Unlike existing solutions, SPar eliminates the need to manually implement low-level mechanisms to leverage stream and data parallelism on heterogeneous systems. In this article, we design an extended version of the language and compiler algorithm for GPU code generation. We newly offer a single parallel programming model targeting CPUs and GPUs to exploit stream and data parallelism. The experiments indicated performance improvement compared with previous versions of SPar and achieved performance comparable to handwritten code using lower-level programming abstractions in specific scenarios.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Graphics processing units (GPUs) and central processing units (CPUs) provide massive parallel computing in our modern computer systems (e.g., servers, desktops, smartphones, and laptops), and efficiently utilizing their processing power requires expertise in parallel programming. Mainly, domain-specific languages (DSLs) address this challenge by improving productivity and abstractions. SPar is a high-level DSL that promotes parallel programming abstractions for stream and data parallelism using C++ attribute annotations for serial code. Unlike existing solutions, SPar eliminates the need to manually implement low-level mechanisms to leverage stream and data parallelism on heterogeneous systems. In this article, we design an extended version of the language and compiler algorithm for GPU code generation. We newly offer a single parallel programming model targeting CPUs and GPUs to exploit stream and data parallelism. The experiments indicated performance improvement compared with previous versions of SPar and achieved performance comparable to handwritten code using lower-level programming abstractions in specific scenarios. |
 | Leonarczyk, Ricardo; Mencagli, Gabriele; Griebler, Dalvan Self-Adaptive Micro-Batching for Low-Latency GPU-Accelerated Stream Processing Journal Article doi In: International Journal of Parallel Programming, vol. 53, no. 2, pp. 14, 2025. @article{LEONARCZYK:IJPP:25,
title = {Self-Adaptive Micro-Batching for Low-Latency GPU-Accelerated Stream Processing},
author = {Ricardo Leonarczyk and Gabriele Mencagli and Dalvan Griebler},
url = {https://doi.org/10.1007/s10766-025-00793-4},
doi = {10.1007/s10766-025-00793-4},
year = {2025},
date = {2025-01-01},
urldate = {2025-01-01},
journal = {International Journal of Parallel Programming},
volume = {53},
number = {2},
pages = {14},
publisher = {Springer},
abstract = {Stream processing is a computing paradigm enabling the continuous processing of unbounded data streams. Some classes of stream processing applications can greatly benefit from the parallel processing power and affordability offered by GPUs. However, efficient GPU utilization with stream processing applications often requires micro-batching techniques, i.e., the continuous processing of data batches to expose data parallelism opportunities and amortize host-device data transfer overheads. Micro-batching further introduces the challenge of finding suitable micro-batch sizes to maintain low-latency processing under highly dynamic workloads. The research field of self-adaptive software provides different techniques to address such a challenge. Our goal is to assess the performance of six self-adaptive algorithms in meeting latency requirements through micro-batch size adaptation. The algorithms are applied to a GPU-accelerated stream processing benchmark with a highly dynamic workload. Four of the six algorithms have already been evaluated using a smaller workload with the same application. We propose two new algorithms to address the shortcomings detected in the former four. The results demonstrate that a highly dynamic workload is challenging for the evaluated algorithms, as they could not meet the most strict latency requirements for more than 38.5% of the stream data items. Overall, all algorithms performed similarly in meeting the latency requirements. However, one of our proposed algorithms met the requirements for 4% more data items than the best of the previously studied algorithms, demonstrating more effectiveness in highly variable workloads. This effectiveness is particularly evident in segments of the workload with abrupt transitions between low- and high-latency regions, where our proposed algorithms met the requirements for 79% of the data items in those segments, compared to 33% for the best of the earlier algorithms.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Stream processing is a computing paradigm enabling the continuous processing of unbounded data streams. Some classes of stream processing applications can greatly benefit from the parallel processing power and affordability offered by GPUs. However, efficient GPU utilization with stream processing applications often requires micro-batching techniques, i.e., the continuous processing of data batches to expose data parallelism opportunities and amortize host-device data transfer overheads. Micro-batching further introduces the challenge of finding suitable micro-batch sizes to maintain low-latency processing under highly dynamic workloads. The research field of self-adaptive software provides different techniques to address such a challenge. Our goal is to assess the performance of six self-adaptive algorithms in meeting latency requirements through micro-batch size adaptation. The algorithms are applied to a GPU-accelerated stream processing benchmark with a highly dynamic workload. Four of the six algorithms have already been evaluated using a smaller workload with the same application. We propose two new algorithms to address the shortcomings detected in the former four. The results demonstrate that a highly dynamic workload is challenging for the evaluated algorithms, as they could not meet the most strict latency requirements for more than 38.5% of the stream data items. Overall, all algorithms performed similarly in meeting the latency requirements. However, one of our proposed algorithms met the requirements for 4% more data items than the best of the previously studied algorithms, demonstrating more effectiveness in highly variable workloads. This effectiveness is particularly evident in segments of the workload with abrupt transitions between low- and high-latency regions, where our proposed algorithms met the requirements for 79% of the data items in those segments, compared to 33% for the best of the earlier algorithms. |
2024
|
 | Hoffmann, Renato B.; Griebler, Dalvan; Righi, Rodrigo Rosa; Fernandes, Luiz G. Benchmarking parallel programming for single-board computers Journal Article doi In: Future Generation Computer Systems, vol. 161, pp. 119-134, 2024. @article{HOFFMANN:single-board-computers:FGCS:24,
title = {Benchmarking parallel programming for single-board computers},
author = {Renato B. Hoffmann and Dalvan Griebler and Rodrigo Rosa Righi and Luiz G. Fernandes},
url = {https://doi.org/10.1016/j.future.2024.07.003},
doi = {10.1016/j.future.2024.07.003},
year = {2024},
date = {2024-12-01},
urldate = {2024-12-01},
journal = {Future Generation Computer Systems},
volume = {161},
pages = {119-134},
publisher = {Elsevier},
abstract = {Within the computing continuum, SBCs (single-board computers) are essential in the Edge and Fog, with many featuring multiple processing cores and GPU accelerators. In this way, parallel computing plays a crucial role in enabling the full computational potential of SBCs. However, selecting the best-suited solution in this context is inherently complex due to the intricate interplay between PPI (parallel programming interface) strategies, SBC architectural characteristics, and application characteristics and constraints. To our knowledge, no solution presents a combined discussion of these three aspects. To tackle this problem, this article aims to provide a benchmark of the best-suited parallelism PPIs given a set of hardware and application characteristics and requirements. Compared to existing benchmarks, we introduce new metrics, additional applications, various parallelism interfaces, and extra hardware devices. Therefore, our contributions are the methodology to benchmark parallelism on SBCs and the characterization of the best-performing parallelism PPIs and strategies for given situations. We are confident that parallel computing will be mainstream to process edge and fog computing; thus, our solution provides the first insights regarding what kind of application and parallel programming interface is the most suited for a particular SBC hardware.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Within the computing continuum, SBCs (single-board computers) are essential in the Edge and Fog, with many featuring multiple processing cores and GPU accelerators. In this way, parallel computing plays a crucial role in enabling the full computational potential of SBCs. However, selecting the best-suited solution in this context is inherently complex due to the intricate interplay between PPI (parallel programming interface) strategies, SBC architectural characteristics, and application characteristics and constraints. To our knowledge, no solution presents a combined discussion of these three aspects. To tackle this problem, this article aims to provide a benchmark of the best-suited parallelism PPIs given a set of hardware and application characteristics and requirements. Compared to existing benchmarks, we introduce new metrics, additional applications, various parallelism interfaces, and extra hardware devices. Therefore, our contributions are the methodology to benchmark parallelism on SBCs and the characterization of the best-performing parallelism PPIs and strategies for given situations. We are confident that parallel computing will be mainstream to process edge and fog computing; thus, our solution provides the first insights regarding what kind of application and parallel programming interface is the most suited for a particular SBC hardware. |
| Guder, Larissa; Aires, João Paulo; Griebler, Dalvan Dimensional Speech Emotion Recognition: a Bimodal Approach Inproceedings doi In: Anais Estendidos do XXX Simpósio Brasileiro de Sistemas Multimídia e Web, pp. 5-6, SBC, Juiz de Fora, Brasil, 2024. @inproceedings{GUDER:WEBMEDIA:24,
title = {Dimensional Speech Emotion Recognition: a Bimodal Approach},
author = {Larissa Guder and João Paulo Aires and Dalvan Griebler},
url = {https://doi.org/10.5753/webmedia_estendido.2024.244402},
doi = {10.5753/webmedia_estendido.2024.244402},
year = {2024},
date = {2024-10-01},
booktitle = {Anais Estendidos do XXX Simpósio Brasileiro de Sistemas Multimídia e Web},
pages = {5-6},
publisher = {SBC},
address = {Juiz de Fora, Brasil},
abstract = {Considering the human-machine relationship, affective computing aims to allow computers to recognize or express emotions. Speech Emotion Recognition is a task from affective computing that aims to recognize emotions in an audio utterance. The most common way to predict emotions from the speech is using pre-determined classes in the offline mode. In that way, emotion recognition is restricted to the number of classes. To avoid this restriction, dimensional emotion recognition uses dimensions such as valence, arousal, and dominance, which can represent emotions with higher granularity. Existing approaches propose using textual information to improve results for the valence dimension. Although recent efforts have tried to improve results on speech emotion recognition to predict emotion dimensions, they do not consider real-world scenarios, where processing the input in a short time is necessary. Considering these aspects, this work provides the first step towards creating a bimodal approach for Dimensional Speech Emotion Recognition in streaming. Our approach combines sentence and audio representations as input to a recurrent neural network that performs speech-emotion recognition. We evaluate different methods for creating audio and text representations, as well as automatic speech recognition techniques. Our best results achieve 0.5915 of CCC for arousal, 0.4165 for valence, and 0.5899 for dominance in the IEMOCAP dataset.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Considering the human-machine relationship, affective computing aims to allow computers to recognize or express emotions. Speech Emotion Recognition is a task from affective computing that aims to recognize emotions in an audio utterance. The most common way to predict emotions from the speech is using pre-determined classes in the offline mode. In that way, emotion recognition is restricted to the number of classes. To avoid this restriction, dimensional emotion recognition uses dimensions such as valence, arousal, and dominance, which can represent emotions with higher granularity. Existing approaches propose using textual information to improve results for the valence dimension. Although recent efforts have tried to improve results on speech emotion recognition to predict emotion dimensions, they do not consider real-world scenarios, where processing the input in a short time is necessary. Considering these aspects, this work provides the first step towards creating a bimodal approach for Dimensional Speech Emotion Recognition in streaming. Our approach combines sentence and audio representations as input to a recurrent neural network that performs speech-emotion recognition. We evaluate different methods for creating audio and text representations, as well as automatic speech recognition techniques. Our best results achieve 0.5915 of CCC for arousal, 0.4165 for valence, and 0.5899 for dominance in the IEMOCAP dataset. |
 | Vogel, Adriano; Danelutto, Marco; Torquati, Massimo; Griebler, Dalvan; Fernandes, Luiz Gustavo Enhancing self-adaptation for efficient decision-making at run-time in streaming applications on multicores Journal Article doi In: The Journal of Supercomputing, vol. 80, no. 15, pp. 22213-22244, 2024. @article{VOGEL:Supercomputing:24,
title = {Enhancing self-adaptation for efficient decision-making at run-time in streaming applications on multicores},
author = {Adriano Vogel and Marco Danelutto and Massimo Torquati and Dalvan Griebler and Luiz Gustavo Fernandes},
url = {https://doi.org/10.1007/s11227-024-06191-w},
doi = {10.1007/s11227-024-06191-w},
year = {2024},
date = {2024-10-01},
urldate = {2024-10-01},
journal = {The Journal of Supercomputing},
volume = {80},
number = {15},
pages = {22213-22244},
publisher = {Springer},
abstract = {Parallel computing is very important to accelerate the performance of computing applications. Moreover, parallel applications are expected to continue executing in more dynamic environments and react to changing conditions. In this context, applying self-adaptation is a potential solution to achieve a higher level of autonomic abstractions and runtime responsiveness. In our research, we aim to explore and assess the possible abstractions attainable through the transparent management of parallel executions by self-adaptation. Our primary objectives are to expand the adaptation space to better reflect real-world applications and assess the potential for self-adaptation to enhance efficiency. We provide the following scientific contributions: (I) A conceptual framework to improve the designing of self-adaptation; (II) A new decision-making strategy for applications with multiple parallel stages; (III) A comprehensive evaluation of the proposed decision-making strategy compared to the state-of-the-art. The results demonstrate that the proposed conceptual framework can help design and implement self-adaptive strategies that are more modular and reusable. The proposed decision-making strategy provides significant gains in accuracy compared to the state-of-the-art, increasing the parallel applications' performance and efficiency.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Parallel computing is very important to accelerate the performance of computing applications. Moreover, parallel applications are expected to continue executing in more dynamic environments and react to changing conditions. In this context, applying self-adaptation is a potential solution to achieve a higher level of autonomic abstractions and runtime responsiveness. In our research, we aim to explore and assess the possible abstractions attainable through the transparent management of parallel executions by self-adaptation. Our primary objectives are to expand the adaptation space to better reflect real-world applications and assess the potential for self-adaptation to enhance efficiency. We provide the following scientific contributions: (I) A conceptual framework to improve the designing of self-adaptation; (II) A new decision-making strategy for applications with multiple parallel stages; (III) A comprehensive evaluation of the proposed decision-making strategy compared to the state-of-the-art. The results demonstrate that the proposed conceptual framework can help design and implement self-adaptive strategies that are more modular and reusable. The proposed decision-making strategy provides significant gains in accuracy compared to the state-of-the-art, increasing the parallel applications' performance and efficiency. |
| Faé, Leonardo; Griebler, Dalvan An internal domain-specific language for expressing linear pipelines: a proof-of-concept with MPI in Rust Inproceedings doi In: Anais do XXVIII Simpósio Brasileiro de Linguagens de Programação, pp. 81-90, SBC, Curitiba/PR, 2024. @inproceedings{FAE:SBLP:24,
title = {An internal domain-specific language for expressing linear pipelines: a proof-of-concept with MPI in Rust},
author = {Leonardo Faé and Dalvan Griebler},
url = {https://doi.org/10.5753/sblp.2024.3691},
doi = {10.5753/sblp.2024.3691},
year = {2024},
date = {2024-09-01},
booktitle = {Anais do XXVIII Simpósio Brasileiro de Linguagens de Programação},
pages = {81-90},
publisher = {SBC},
address = {Curitiba/PR},
series = {SBLP'24},
abstract = {Parallel computation is necessary in order to process massive volumes of data in a timely manner. There are many parallel programming interfaces and environments, each with their own idiosyncrasies. This, alongside non-deterministic errors, make parallel programs notoriously challenging to write. Great effort has been put forth to make parallel programming for several environments easier. In this work, we propose a DSL for Rust, using the language’s source-to-source transformation facilities, that allows for automatic code generation for distributed environments that support the Message Passing Interface (MPI). Our DSL simplifies MPI’s quirks, allowing the programmer to focus almost exclusively on the computation at hand. Performance experiments show nearly or no runtime difference between our abstraction and manually written MPI code while resulting in less than half the lines of code. More elaborate code complexity metrics (Halstead) estimate from 4.5 to 14.7 times lower effort for expressing parallelism.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Parallel computation is necessary in order to process massive volumes of data in a timely manner. There are many parallel programming interfaces and environments, each with their own idiosyncrasies. This, alongside non-deterministic errors, make parallel programs notoriously challenging to write. Great effort has been put forth to make parallel programming for several environments easier. In this work, we propose a DSL for Rust, using the language’s source-to-source transformation facilities, that allows for automatic code generation for distributed environments that support the Message Passing Interface (MPI). Our DSL simplifies MPI’s quirks, allowing the programmer to focus almost exclusively on the computation at hand. Performance experiments show nearly or no runtime difference between our abstraction and manually written MPI code while resulting in less than half the lines of code. More elaborate code complexity metrics (Halstead) estimate from 4.5 to 14.7 times lower effort for expressing parallelism. |
| Löff, J'unior; Griebler, Dalvan; Fernandes, Luiz Gustavo; Binder, Walter MPR: An MPI Framework for Distributed Self-adaptive Stream Processing Inproceedings doi In: Euro-Par 2024: Parallel Processing, pp. 400-414, Springer, Madrid, Spain, 2024. @inproceedings{LOFF:Euro-Par:24,
title = {MPR: An MPI Framework for Distributed Self-adaptive Stream Processing},
author = {J'unior Löff and Dalvan Griebler and Luiz Gustavo Fernandes and Walter Binder},
url = {https://doi.org/10.1007/978-3-031-69583-4_28},
doi = {10.1007/978-3-031-69583-4_28},
year = {2024},
date = {2024-08-01},
booktitle = {Euro-Par 2024: Parallel Processing},
pages = {400-414},
publisher = {Springer},
address = {Madrid, Spain},
series = {Euro-Par'24},
abstract = {Stream processing systems must often cope with workloads varying in content, format, size, and input rate. The high variability and unpredictability make statically fine-tuning them very challenging. Our work addresses this limitation by providing a new framework and runtime system to simplify implementing and assessing new self-adaptive algorithms and optimizations. We implement a prototype on top of MPI called MPR and show its functionality. We focus on horizontal scaling by supporting the addition and removal of processes during execution time. Experiments reveal that MPR can achieve performance similar to that of a handwritten static MPI application. We also assess MPR's adaptation capabilities, showing that it can readily re-configure itself, with the help of a self-adaptive algorithm, in response to workload variations.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Stream processing systems must often cope with workloads varying in content, format, size, and input rate. The high variability and unpredictability make statically fine-tuning them very challenging. Our work addresses this limitation by providing a new framework and runtime system to simplify implementing and assessing new self-adaptive algorithms and optimizations. We implement a prototype on top of MPI called MPR and show its functionality. We focus on horizontal scaling by supporting the addition and removal of processes during execution time. Experiments reveal that MPR can achieve performance similar to that of a handwritten static MPI application. We also assess MPR's adaptation capabilities, showing that it can readily re-configure itself, with the help of a self-adaptive algorithm, in response to workload variations. |
| Gomes, Carlos Falcao Azevedo; Araujo, Adriel Silva; Ahmad, Sunna Imtiaz; Magnaguagno, Mauricio Cecilio; Teixeira, Vinicius Crisosthemos; Rajapuri, Anushri Singh; Roederer, Quinn; Griebler, Dalvan; Dutra, Vinicius; Turkkahraman, Hakan; Pinho, Marcio Sarroglia Multiview Machine Learning Classification of Tooth Extraction in Orthodontics Using Intraoral Scans Inproceedings doi In: 2024 IEEE 48th Annual Computers, Software, and Applications Conference (COMPSAC), pp. 1977-1982, IEEE, Osaka, Japan, 2024. @inproceedings{GOMES:COMPSAC:24,
title = {Multiview Machine Learning Classification of Tooth Extraction in Orthodontics Using Intraoral Scans},
author = {Carlos Falcao Azevedo Gomes and Adriel Silva Araujo and Sunna Imtiaz Ahmad and Mauricio Cecilio Magnaguagno and Vinicius Crisosthemos Teixeira and Anushri Singh Rajapuri and Quinn Roederer and Dalvan Griebler and Vinicius Dutra and Hakan Turkkahraman and Marcio Sarroglia Pinho},
url = {https://doi.org/10.1109/COMPSAC61105.2024.00316},
doi = {10.1109/COMPSAC61105.2024.00316},
year = {2024},
date = {2024-07-01},
booktitle = {2024 IEEE 48th Annual Computers, Software, and Applications Conference (COMPSAC)},
pages = {1977-1982},
publisher = {IEEE},
address = {Osaka, Japan},
abstract = {Orthodontic treatment planning often involves de-ciding whether to extract teeth, a critical and irreversible decision. Integrating machine learning (ML) can enhance decision-making. This study proposes using Intraoral Scans (IOS) 3D models to predict extraction/non-extraction binary decisions with ML models. We leverage a multiview approach, using images taken from multiple points of view of the 3D model. The methodology involved a dataset composed of preprocessed IOS from 181 subjects and an experimental procedure that evaluated multiple ML models in their ability to classify subjects using either grayscale pixel intensities or radiomic features. The results indicated that a logistic model applied to the radiomic features from the back and frontal views of the 3D models was one of the best model candidates, achieving a test accuracy of 70 % and F1 score of. 73 and. 65 for non-extraction and extraction cases, respectively. Overall, these findings indicate that a multiview approach to IOS 3D models can be used to predict extraction/non-extraction decisions. In addition, the results suggest that radiomic features provide useful information in the analysis of IOS data.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Orthodontic treatment planning often involves de-ciding whether to extract teeth, a critical and irreversible decision. Integrating machine learning (ML) can enhance decision-making. This study proposes using Intraoral Scans (IOS) 3D models to predict extraction/non-extraction binary decisions with ML models. We leverage a multiview approach, using images taken from multiple points of view of the 3D model. The methodology involved a dataset composed of preprocessed IOS from 181 subjects and an experimental procedure that evaluated multiple ML models in their ability to classify subjects using either grayscale pixel intensities or radiomic features. The results indicated that a logistic model applied to the radiomic features from the back and frontal views of the 3D models was one of the best model candidates, achieving a test accuracy of 70 % and F1 score of. 73 and. 65 for non-extraction and extraction cases, respectively. Overall, these findings indicate that a multiview approach to IOS 3D models can be used to predict extraction/non-extraction decisions. In addition, the results suggest that radiomic features provide useful information in the analysis of IOS data. |
| Guder, Larissa; Aires, João Paulo; Meneguzzi, Felipe; Griebler, Dalvan Dimensional Speech Emotion Recognition from Bimodal Features Inproceedings doi In: Anais do XXIV Simpósio Brasileiro de Computação Aplicada à Saúde, pp. 579-590, SBC, Goiânia, Brasil, 2024. @inproceedings{GUDER:SBCAS:24,
title = {Dimensional Speech Emotion Recognition from Bimodal Features},
author = {Larissa Guder and João Paulo Aires and Felipe Meneguzzi and Dalvan Griebler},
url = {https://doi.org/10.5753/sbcas.2024.2779},
doi = {10.5753/sbcas.2024.2779},
year = {2024},
date = {2024-07-01},
booktitle = {Anais do XXIV Simpósio Brasileiro de Computação Aplicada à Saúde},
pages = {579-590},
publisher = {SBC},
address = {Goiânia, Brasil},
abstract = {Considering the human-machine relationship, affective computing aims to allow computers to recognize or express emotions. Speech Emotion Recognition is a task from affective computing that aims to recognize emotions in an audio utterance. The most common way to predict emotions from the speech is using pre-determined classes in the offline mode. In that way, emotion recognition is restricted to the number of classes. To avoid this restriction, dimensional emotion recognition uses dimensions such as valence, arousal, and dominance to represent emotions with higher granularity. Existing approaches propose using textual information to improve results for the valence dimension. Although recent efforts have tried to improve results on speech emotion recognition to predict emotion dimensions, they do not consider real-world scenarios where processing the input quickly is necessary. Considering these aspects, we take the first step towards creating a bimodal approach for dimensional speech emotion recognition in streaming. Our approach combines sentence and audio representations as input to a recurrent neural network that performs speechemotion recognition. Our final architecture achieves a Concordance Correlation Coefficient of 0.5915 for arousal, 0.1431 for valence, and 0.5899 for dominance in the IEMOCAP dataset.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Considering the human-machine relationship, affective computing aims to allow computers to recognize or express emotions. Speech Emotion Recognition is a task from affective computing that aims to recognize emotions in an audio utterance. The most common way to predict emotions from the speech is using pre-determined classes in the offline mode. In that way, emotion recognition is restricted to the number of classes. To avoid this restriction, dimensional emotion recognition uses dimensions such as valence, arousal, and dominance to represent emotions with higher granularity. Existing approaches propose using textual information to improve results for the valence dimension. Although recent efforts have tried to improve results on speech emotion recognition to predict emotion dimensions, they do not consider real-world scenarios where processing the input quickly is necessary. Considering these aspects, we take the first step towards creating a bimodal approach for dimensional speech emotion recognition in streaming. Our approach combines sentence and audio representations as input to a recurrent neural network that performs speechemotion recognition. Our final architecture achieves a Concordance Correlation Coefficient of 0.5915 for arousal, 0.1431 for valence, and 0.5899 for dominance in the IEMOCAP dataset. |
| Bianchessi, Lucas S.; Faé, Leonardo G.; Hoffmann, Renato B.; Griebler, Dalvan Analisando Paralelismo de Dados em Rust Usando o Método do Gradiente Conjugado Inproceedings doi In: Anais da XXIV Escola Regional de Alto Desempenho da Região Sul, pp. 9-12, Sociedade Brasileira de Computação, Florianópolis, Brazil, 2024. @inproceedings{BIANCHESSI:ERAD:24,
title = {Analisando Paralelismo de Dados em Rust Usando o Método do Gradiente Conjugado},
author = {Lucas S. Bianchessi and Leonardo G. Faé and Renato B. Hoffmann and Dalvan Griebler},
url = {https://doi.org/10.5753/eradrs.2024.238677},
doi = {10.5753/eradrs.2024.238677},
year = {2024},
date = {2024-04-01},
booktitle = {Anais da XXIV Escola Regional de Alto Desempenho da Região Sul},
pages = {9-12},
publisher = {Sociedade Brasileira de Computação},
address = {Florianópolis, Brazil},
abstract = {Em meio ao ambiente da computação de alto desempenho, a linguagem Rust vem se tornando cada vez mais popular, prometendo segurança, desempenho e um ambiente de desenvolvimento moderno. Afim de analisar a viabilidade e eficiência do Rust, foi utilizado método do gradiente conjugado do NPB benchmarks. Os resultados demonstraram resultados paralelos comparáveis ao C++, e perda de desempenho na versão sequencial.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Em meio ao ambiente da computação de alto desempenho, a linguagem Rust vem se tornando cada vez mais popular, prometendo segurança, desempenho e um ambiente de desenvolvimento moderno. Afim de analisar a viabilidade e eficiência do Rust, foi utilizado método do gradiente conjugado do NPB benchmarks. Os resultados demonstraram resultados paralelos comparáveis ao C++, e perda de desempenho na versão sequencial. |
| Hoffmann, Renato B. Em Direção à Programação Distribuída na Seleção de Planos em Sistemas Multi-Agentes Inproceedings doi In: Anais da XXIV Escola Regional de Alto Desempenho da Região Sul, pp. 121-122, Sociedade Brasileira de Computação, Florianópolis, Brazil, 2024. @inproceedings{HOFFMANN:ERAD:24,
title = {Em Direção à Programação Distribuída na Seleção de Planos em Sistemas Multi-Agentes},
author = {Renato B. Hoffmann},
url = {https://doi.org/10.5753/eradrs.2024.238734},
doi = {10.5753/eradrs.2024.238734},
year = {2024},
date = {2024-04-01},
booktitle = {Anais da XXIV Escola Regional de Alto Desempenho da Região Sul},
pages = {121-122},
publisher = {Sociedade Brasileira de Computação},
address = {Florianópolis, Brazil},
abstract = {Sistemas multi-agentes são compostos por múltiplos agentes autônomos que interagem com um ambiente e entre si para atingir objetivos específicos. Jason, uma linguagem multi-agentes popular modela esses sistemas através de crenças, metas e planos, que é atualmente realizada através de uma varredura linear. Sendo assim, essa pesquisa propõem investigar a seleção dos planos de maneira paralela e em um sistema distribuído.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Sistemas multi-agentes são compostos por múltiplos agentes autônomos que interagem com um ambiente e entre si para atingir objetivos específicos. Jason, uma linguagem multi-agentes popular modela esses sistemas através de crenças, metas e planos, que é atualmente realizada através de uma varredura linear. Sendo assim, essa pesquisa propõem investigar a seleção dos planos de maneira paralela e em um sistema distribuído. |
| Alf, Lucas M.; Griebler, Dalvan Tolerância a Falhas para Paralelismo de Stream de Alto Nível Inproceedings doi In: Anais da XXIV Escola Regional de Alto Desempenho da Região Sul, pp. 119-120, Sociedade Brasileira de Computação, Florianópolis, Brazil, 2024. @inproceedings{ALF:ERAD:24,
title = {Tolerância a Falhas para Paralelismo de Stream de Alto Nível },
author = {Lucas M. Alf and Dalvan Griebler},
url = {https://doi.org/10.5753/eradrs.2024.238679},
doi = {10.5753/eradrs.2024.238679},
year = {2024},
date = {2024-04-01},
booktitle = {Anais da XXIV Escola Regional de Alto Desempenho da Região Sul},
pages = {119-120},
publisher = {Sociedade Brasileira de Computação},
address = {Florianópolis, Brazil},
abstract = {Dada a necessidade dos sistemas de processamento de stream serem executados por longos períodos de tempo, possivelmente indefinidamente, realizar o reprocessamento de todos os dados em caso de falha pode ser altamente custoso ou até mesmo inviável. Nesta pesquisa, propomos investigar como fornecer mecanismos de tolerância a falhas e garantias de consistência para paralelismo de stream distribuído em alto nível.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Dada a necessidade dos sistemas de processamento de stream serem executados por longos períodos de tempo, possivelmente indefinidamente, realizar o reprocessamento de todos os dados em caso de falha pode ser altamente custoso ou até mesmo inviável. Nesta pesquisa, propomos investigar como fornecer mecanismos de tolerância a falhas e garantias de consistência para paralelismo de stream distribuído em alto nível. |
| Faé, Leonardo G.; Griebler, Dalvan Proposta de Pipelines Lineares de Alto Nível em Rust Utilizando GPU Inproceedings doi In: Anais da XXIV Escola Regional de Alto Desempenho da Região Sul, pp. 105-106, Sociedade Brasileira de Computação, Florianópolis, Brazil, 2024. @inproceedings{FAE:ERAD:24,
title = {Proposta de Pipelines Lineares de Alto Nível em Rust Utilizando GPU},
author = {Leonardo G. Faé and Dalvan Griebler},
url = {https://doi.org/10.5753/eradrs.2024.238565},
doi = {10.5753/eradrs.2024.238565},
year = {2024},
date = {2024-04-01},
booktitle = {Anais da XXIV Escola Regional de Alto Desempenho da Região Sul},
pages = {105-106},
publisher = {Sociedade Brasileira de Computação},
address = {Florianópolis, Brazil},
abstract = {Unidades de Processamento Gráfico (GPUs) são unidades de hardware projetadas para processar quantidades massivas de dados em paralelo. Rust é uma nova linguagem de programação de baixo nível com foco em desempenho e segurança. Até o momento, há poucos trabalhos acadêmicos sobre abstrações de alto nível para GPUs em Rust. Propomos uma possível abstração, baseada no padrão de pipeline e implementada utilizando macros procedurais.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Unidades de Processamento Gráfico (GPUs) são unidades de hardware projetadas para processar quantidades massivas de dados em paralelo. Rust é uma nova linguagem de programação de baixo nível com foco em desempenho e segurança. Até o momento, há poucos trabalhos acadêmicos sobre abstrações de alto nível para GPUs em Rust. Propomos uma possível abstração, baseada no padrão de pipeline e implementada utilizando macros procedurais. |
| Araujo, Gabriell; Griebler, Dalvan; Fernandes, Luiz Gustavo Em direção a um modelo de programação paralela único para CPUs e GPUs em processamento de stream Inproceedings doi In: Anais da XXIV Escola Regional de Alto Desempenho da Região Sul, pp. 103-104, Sociedade Brasileira de Computação, Florianópolis, Brazil, 2024. @inproceedings{ARAUJO:ERAD:24,
title = {Em direção a um modelo de programação paralela único para CPUs e GPUs em processamento de stream },
author = {Gabriell Araujo and Dalvan Griebler and Luiz Gustavo Fernandes},
url = {https://doi.org/10.5753/eradrs.2024.238670},
doi = {10.5753/eradrs.2024.238670},
year = {2024},
date = {2024-04-01},
booktitle = {Anais da XXIV Escola Regional de Alto Desempenho da Região Sul},
pages = {103-104},
publisher = {Sociedade Brasileira de Computação},
address = {Florianópolis, Brazil},
abstract = {Este trabalho apresenta resultados parciais da pesquisa em andamento, a qual está utilizando a Linguagem Específica de Domínio (DSL) SPar para prototipar um modelo de programação paralela único direcionado a CPUs e GPUs em processamento de stream. Por meio do protótipo inicial, já é possível gerar código paralelo para CPUs e GPUs em processamento de stream.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Este trabalho apresenta resultados parciais da pesquisa em andamento, a qual está utilizando a Linguagem Específica de Domínio (DSL) SPar para prototipar um modelo de programação paralela único direcionado a CPUs e GPUs em processamento de stream. Por meio do protótipo inicial, já é possível gerar código paralelo para CPUs e GPUs em processamento de stream. |
| Fim, Gabriel Rustick; Griebler, Dalvan Proposta de Paralelismo de Stream Multi-GPU em Multi-Cores Inproceedings doi In: Anais da XXIV Escola Regional de Alto Desempenho da Região Sul, pp. 101-102, Sociedade Brasileira de Computação, Florianópolis, Brazil, 2024. @inproceedings{FIM:ERAD:24,
title = {Proposta de Paralelismo de Stream Multi-GPU em Multi-Cores },
author = {Gabriel Rustick Fim and Dalvan Griebler},
url = {https://doi.org/10.5753/eradrs.2024.238680},
doi = {10.5753/eradrs.2024.238680},
year = {2024},
date = {2024-04-01},
booktitle = {Anais da XXIV Escola Regional de Alto Desempenho da Região Sul},
pages = {101-102},
publisher = {Sociedade Brasileira de Computação},
address = {Florianópolis, Brazil},
abstract = {Considerando a necessidade de tempos de processamento mais rápidos, a utilização de ambientes multi-aceleradores vem se tornando cada vez mais proeminente na literatura, infelizmente programar para estes tipos de ambientes apresenta uma série de desafios que fazem com que o desenvolvimento de códigos direcionados a multi-GPUs exija um maior esforço de programação. Propomos investigar como utilizar anotações C++ para simplificar a geração de código multi-GPU sem comprometer o desempenho.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Considerando a necessidade de tempos de processamento mais rápidos, a utilização de ambientes multi-aceleradores vem se tornando cada vez mais proeminente na literatura, infelizmente programar para estes tipos de ambientes apresenta uma série de desafios que fazem com que o desenvolvimento de códigos direcionados a multi-GPUs exija um maior esforço de programação. Propomos investigar como utilizar anotações C++ para simplificar a geração de código multi-GPU sem comprometer o desempenho. |
| Leonarczyk, Ricardo; Griebler, Dalvan; Mencagli, Gabriele; Danelutto, Marco Evaluation of Adaptive Micro-batching Techniques for GPU-accelerated Stream Processing Inproceedings doi In: Euro-Par 2023: Parallel Processing Workshops, pp. 81-92, Springer, Limassol, Cyprus, 2024. @inproceedings{LEONARCZYK:Euro-ParW:23,
title = {Evaluation of Adaptive Micro-batching Techniques for GPU-accelerated Stream Processing},
author = {Ricardo Leonarczyk and Dalvan Griebler and Gabriele Mencagli and Marco Danelutto},
url = {https://doi.org/10.1007/978-3-031-50684-0_7},
doi = {10.1007/978-3-031-50684-0_7},
year = {2024},
date = {2024-04-01},
booktitle = {Euro-Par 2023: Parallel Processing Workshops},
pages = {81-92},
publisher = {Springer},
address = {Limassol, Cyprus},
series = {Euro-ParW'23},
abstract = {Stream processing plays a vital role in applications that require continuous, low-latency data processing. Thanks to their extensive parallel processing capabilities and relatively low cost, GPUs are well-suited to scenarios where such applications require substantial computational resources. However, micro-batching becomes essential for efficient GPU computation within stream processing systems. However, finding appropriate batch sizes to maintain an adequate level of service is often challenging, particularly in cases where applications experience fluctuations in input rate and workload. Addressing this challenge requires adjusting the optimal batch size at runtime. This study proposes a methodology for evaluating different self-adaptive micro-batching strategies in a real-world complex streaming application used as a benchmark.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Stream processing plays a vital role in applications that require continuous, low-latency data processing. Thanks to their extensive parallel processing capabilities and relatively low cost, GPUs are well-suited to scenarios where such applications require substantial computational resources. However, micro-batching becomes essential for efficient GPU computation within stream processing systems. However, finding appropriate batch sizes to maintain an adequate level of service is often challenging, particularly in cases where applications experience fluctuations in input rate and workload. Addressing this challenge requires adjusting the optimal batch size at runtime. This study proposes a methodology for evaluating different self-adaptive micro-batching strategies in a real-world complex streaming application used as a benchmark. |
 | Garcia, Adriano Marques; Griebler, Dalvan; Schepke, Claudio; García, José Daniel; Muñoz, Javier Fernández; Fernandes, Luiz Gustavo Performance and programmability of GrPPI for parallel stream processing on multi-cores Journal Article doi In: The Journal of Supercomputing, vol. 80, no. 9, pp. 12966-13000, 2024. @article{GARCIA:JS:24,
title = {Performance and programmability of GrPPI for parallel stream processing on multi-cores},
author = {Adriano Marques Garcia and Dalvan Griebler and Claudio Schepke and José Daniel García and Javier Fernández Muñoz and Luiz Gustavo Fernandes},
url = {https://doi.org/10.1007/s11227-024-05934-z},
doi = {10.1007/s11227-024-05934-z},
year = {2024},
date = {2024-02-01},
urldate = {2024-02-01},
journal = {The Journal of Supercomputing},
volume = {80},
number = {9},
pages = {12966-13000},
publisher = {Springer},
abstract = {GrPPI library aims to simplify the burdening task of parallel programming. It provides a unified, abstract, and generic layer while promising minimal overhead on performance. Although it supports stream parallelism, GrPPI lacks an evaluation regarding representative performance metrics for this domain, such as throughput and latency. This work evaluates GrPPI focused on parallel stream processing. We compare the throughput and latency performance, memory usage, and programmability of GrPPI against handwritten parallel code. For this, we use the benchmarking framework SPBench to build custom GrPPI benchmarks and benchmarks with handwritten parallel code using the same backends supported by GrPPI. The basis of the benchmarks is real applications, such as Lane Detection, Bzip2, Face Recognizer, and Ferret. Experiments show that while performance is often competitive with handwritten parallel code, the infeasibility of fine-tuning GrPPI is a crucial drawback for emerging applications. Despite this, programmability experiments estimate that GrPPI can potentially reduce the development time of parallel applications by about three times.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
GrPPI library aims to simplify the burdening task of parallel programming. It provides a unified, abstract, and generic layer while promising minimal overhead on performance. Although it supports stream parallelism, GrPPI lacks an evaluation regarding representative performance metrics for this domain, such as throughput and latency. This work evaluates GrPPI focused on parallel stream processing. We compare the throughput and latency performance, memory usage, and programmability of GrPPI against handwritten parallel code. For this, we use the benchmarking framework SPBench to build custom GrPPI benchmarks and benchmarks with handwritten parallel code using the same backends supported by GrPPI. The basis of the benchmarks is real applications, such as Lane Detection, Bzip2, Face Recognizer, and Ferret. Experiments show that while performance is often competitive with handwritten parallel code, the infeasibility of fine-tuning GrPPI is a crucial drawback for emerging applications. Despite this, programmability experiments estimate that GrPPI can potentially reduce the development time of parallel applications by about three times. |
 | Mencagli, Gabriele; Torquati, Massimo; Griebler, Dalvan; Fais, Alessandra; Danelutto, Marco General-purpose data stream processing on heterogeneous architectures with WindFlow Journal Article doi In: Journal of Parallel and Distributed Computing, vol. 184, pp. 104782, 2024. @article{MENCAGLI:JPDC:24,
title = {General-purpose data stream processing on heterogeneous architectures with WindFlow},
author = {Gabriele Mencagli and Massimo Torquati and Dalvan Griebler and Alessandra Fais and Marco Danelutto},
url = {https://doi.org/10.1016/j.jpdc.2023.104782},
doi = {10.1016/j.jpdc.2023.104782},
year = {2024},
date = {2024-02-01},
urldate = {2024-02-01},
journal = {Journal of Parallel and Distributed Computing},
volume = {184},
pages = {104782},
publisher = {Elsevier},
abstract = {Many emerging applications analyze data streams by running graphs of communicating tasks called operators. To develop and deploy such applications, Stream Processing Systems (SPSs) like Apache Storm and Flink have been made available to researchers and practitioners. They exhibit imperative or declarative programming interfaces to develop operators running arbitrary algorithms working on structured or unstructured data streams. In this context, the interest in leveraging hardware acceleration with GPUs has become more pronounced in high-throughput use cases. Unfortunately, GPU acceleration has been studied for relational operators working on structured streams only, while non-relational operators have often been overlooked. This paper presents WindFlow, a library supporting the seamless GPU offloading of general partitioned-stateful operators, extending the range of operators that benefit from hardware acceleration. Its design provides high throughput still exposing a high-level API to users compared with the raw utilization of GPUs in Apache Flink.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Many emerging applications analyze data streams by running graphs of communicating tasks called operators. To develop and deploy such applications, Stream Processing Systems (SPSs) like Apache Storm and Flink have been made available to researchers and practitioners. They exhibit imperative or declarative programming interfaces to develop operators running arbitrary algorithms working on structured or unstructured data streams. In this context, the interest in leveraging hardware acceleration with GPUs has become more pronounced in high-throughput use cases. Unfortunately, GPU acceleration has been studied for relational operators working on structured streams only, while non-relational operators have often been overlooked. This paper presents WindFlow, a library supporting the seamless GPU offloading of general partitioned-stateful operators, extending the range of operators that benefit from hardware acceleration. Its design provides high throughput still exposing a high-level API to users compared with the raw utilization of GPUs in Apache Flink. |
 | Fischer, Gabriel Souto; Ramos, Gabriel Oliveira; Costa, Cristiano André; Alberti, Antonio Marcos; Griebler, Dalvan; Singh, Dhananjay; Righi, Rodrigo Rosa Multi-Hospital Management: Combining Vital Signs IoT Data and the Elasticity Technique to Support Healthcare 4.0 Journal Article doi In: IoT, vol. 5, no. 2, pp. 381-408, 2024. @article{FISCHER:IoT:24,
title = {Multi-Hospital Management: Combining Vital Signs IoT Data and the Elasticity Technique to Support Healthcare 4.0},
author = {Gabriel Souto Fischer and Gabriel Oliveira Ramos and Cristiano André Costa and Antonio Marcos Alberti and Dalvan Griebler and Dhananjay Singh and Rodrigo Rosa Righi},
url = {https://doi.org/10.3390/iot5020019},
doi = {10.3390/iot5020019},
year = {2024},
date = {2024-01-01},
urldate = {2024-01-01},
journal = {IoT},
volume = {5},
number = {2},
pages = {381-408},
publisher = {MDPI},
abstract = {Smart cities can improve the quality of life of citizens by optimizing the utilization of resources. In an IoT-connected environment, people's health can be constantly monitored, which can help identify medical problems before they become serious. However, overcrowded hospitals can lead to long waiting times for patients to receive treatment. The literature presents alternatives to address this problem by adjusting care capacity to demand. However, there is still a need for a solution that can adjust human resources in multiple healthcare settings, which is the reality of cities. This work introduces HealCity, a smart-city-focused model that can monitor patients’ use of healthcare settings and adapt the allocation of health professionals to meet their needs. HealCity uses vital signs (IoT) data in prediction techniques to anticipate when the demand for a given environment will exceed its capacity and suggests actions to allocate health professionals accordingly. Additionally, we introduce the concept of multilevel proactive human resources elasticity in smart cities, thus managing human resources at different levels of a smart city. An algorithm is also devised to automatically manage and identify the appropriate hospital for a possible future patient. Furthermore, some IoT deployment considerations are presented based on a hardware implementation for the proposed model. HealCity was evaluated with four hospital settings and obtained promising results: Compared to hospitals with rigid professional allocations, it reduced waiting time for care by up to 87.62%.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Smart cities can improve the quality of life of citizens by optimizing the utilization of resources. In an IoT-connected environment, people's health can be constantly monitored, which can help identify medical problems before they become serious. However, overcrowded hospitals can lead to long waiting times for patients to receive treatment. The literature presents alternatives to address this problem by adjusting care capacity to demand. However, there is still a need for a solution that can adjust human resources in multiple healthcare settings, which is the reality of cities. This work introduces HealCity, a smart-city-focused model that can monitor patients’ use of healthcare settings and adapt the allocation of health professionals to meet their needs. HealCity uses vital signs (IoT) data in prediction techniques to anticipate when the demand for a given environment will exceed its capacity and suggests actions to allocate health professionals accordingly. Additionally, we introduce the concept of multilevel proactive human resources elasticity in smart cities, thus managing human resources at different levels of a smart city. An algorithm is also devised to automatically manage and identify the appropriate hospital for a possible future patient. Furthermore, some IoT deployment considerations are presented based on a hardware implementation for the proposed model. HealCity was evaluated with four hospital settings and obtained promising results: Compared to hospitals with rigid professional allocations, it reduced waiting time for care by up to 87.62%. |
2023
|
| Hoffmann, Renato Barreto; Faé, Leonardo; Manssour, Isabel; Griebler, Dalvan Analyzing C++ Stream Parallelism in Shared-Memory when Porting to Flink and Storm Inproceedings doi In: International Symposium on Computer Architecture and High Performance Computing Workshops (SBAC-PADW), pp. 1-8, IEEE, Porto Alegre, Brazil, 2023. @inproceedings{HOFFMANN:SBAC-PADW:23,
title = {Analyzing C++ Stream Parallelism in Shared-Memory when Porting to Flink and Storm},
author = {Renato Barreto Hoffmann and Leonardo Faé and Isabel Manssour and Dalvan Griebler},
url = {https://doi.org/10.1109/SBAC-PADW60351.2023.00017},
doi = {10.1109/SBAC-PADW60351.2023.00017},
year = {2023},
date = {2023-10-01},
booktitle = {International Symposium on Computer Architecture and High Performance Computing Workshops (SBAC-PADW)},
pages = {1-8},
publisher = {IEEE},
address = {Porto Alegre, Brazil},
series = {SBAC-PADW'23},
abstract = {Stream processing plays a crucial role in various information-oriented digital systems. Two popular frameworks for real-time data processing, Flink and Storm, provide solutions for effective parallel stream processing in Java. An option to leverage Java's mature ecosystem for distributed stream processing involves porting legacy C++ applications to Java. However, this raises considerations on the adequacy of the equivalent Java mechanisms and potential degradation in throughput. Therefore, our objective is to evaluate programmability and performance when converting stream processing applications from C++ to Java while also exploring the parallelization capabilities offered by Flink and Storm. Furthermore, we aim to assess the throughput of Flink and Storm on shared-memory manycore machines, a hardware architecture commonly found in cloud environments. To achieve this, we conduct experiments involving four different stream processing applications. We highlight challenges encountered when porting C++ to Java and working with Flink and Storm. Furthermore, we discuss throughput, latency, CPU, and memory usage results.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Stream processing plays a crucial role in various information-oriented digital systems. Two popular frameworks for real-time data processing, Flink and Storm, provide solutions for effective parallel stream processing in Java. An option to leverage Java's mature ecosystem for distributed stream processing involves porting legacy C++ applications to Java. However, this raises considerations on the adequacy of the equivalent Java mechanisms and potential degradation in throughput. Therefore, our objective is to evaluate programmability and performance when converting stream processing applications from C++ to Java while also exploring the parallelization capabilities offered by Flink and Storm. Furthermore, we aim to assess the throughput of Flink and Storm on shared-memory manycore machines, a hardware architecture commonly found in cloud environments. To achieve this, we conduct experiments involving four different stream processing applications. We highlight challenges encountered when porting C++ to Java and working with Flink and Storm. Furthermore, we discuss throughput, latency, CPU, and memory usage results. |
| Andrade, Gabriella; Griebler, Dalvan; Santos, Rodrigo; Fernandes, Luiz Gustavo Extending the Planning Poker Method to Estimate the Development Effort of Parallel Applications Inproceedings doi In: Anais do XXIII Simpósio em Sistemas Computacionais de Alto Desempenho (WSCAD), pp. 181-192, SBC, Porto Alegre, Brasil, 2023. @inproceedings{ANDRADE:WSCAD:23,
title = {Extending the Planning Poker Method to Estimate the Development Effort of Parallel Applications},
author = {Gabriella Andrade and Dalvan Griebler and Rodrigo Santos and Luiz Gustavo Fernandes},
url = {https://doi.org/10.5753/wscad.2023.235925},
doi = {10.5753/wscad.2023.235925},
year = {2023},
date = {2023-10-01},
booktitle = {Anais do XXIII Simpósio em Sistemas Computacionais de Alto Desempenho (WSCAD)},
pages = {181-192},
publisher = {SBC},
address = {Porto Alegre, Brasil},
abstract = {Since different Parallel Programming Interfaces (PPIs) are available to programmers, evaluating them to identify the most suitable PPI also became necessary. Recently, in addition to the performance of PPIs, developers’ productivity has also been evaluated by researchers in parallel processing. Some researchers conduct empirical studies involving people for productivity evaluation, which is time-consuming. Aiming to propose a less costly method for evaluating the development effort of parallel applications, we proposed modifying the Planning Poker method in this paper. We consider a representative set of parallel stream processing applications to evaluate the proposed modification. Our results showed that the proposed method required less effort for practical use than the controlled experiments with students.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Since different Parallel Programming Interfaces (PPIs) are available to programmers, evaluating them to identify the most suitable PPI also became necessary. Recently, in addition to the performance of PPIs, developers’ productivity has also been evaluated by researchers in parallel processing. Some researchers conduct empirical studies involving people for productivity evaluation, which is time-consuming. Aiming to propose a less costly method for evaluating the development effort of parallel applications, we proposed modifying the Planning Poker method in this paper. We consider a representative set of parallel stream processing applications to evaluate the proposed modification. Our results showed that the proposed method required less effort for practical use than the controlled experiments with students. |
| Alf, Lucas; Hoffmann, Renato Barreto; Müller, Caetano; Griebler, Dalvan Análise da Execução de Algoritmos de Aprendizado de Máquina em Dispositivos Embarcados Inproceedings doi In: Anais do XXIII Simpósio em Sistemas Computacionais de Alto Desempenho (WSCAD), pp. 61-72, SBC, Porto Alegre, Brasil, 2023. @inproceedings{ALF:WSCAD:23,
title = {Análise da Execução de Algoritmos de Aprendizado de Máquina em Dispositivos Embarcados},
author = {Lucas Alf and Renato Barreto Hoffmann and Caetano Müller and Dalvan Griebler},
url = {https://doi.org/10.5753/wscad.2023.235915},
doi = {10.5753/wscad.2023.235915},
year = {2023},
date = {2023-10-01},
booktitle = {Anais do XXIII Simpósio em Sistemas Computacionais de Alto Desempenho (WSCAD)},
pages = {61-72},
publisher = {SBC},
address = {Porto Alegre, Brasil},
abstract = {Os avanços na área de IoT motivam a utilização de algoritmos de aprendizado de máquina em dispositivos embarcados. Entretanto, esses algoritmos exigem uma quantidade considerável de recursos computacionais. O objetivo deste trabalho consistiu em analisar algoritmos de aprendizado de máquina em dispositivos embarcados utilizando paralelismo em CPU e GPU com o intuito de compreender quais características de hardware e software desempenham melhor em relação ao consumo energético, inferências por segundo e acurácia. Foram avaliados três modelos de Convolutional Neural Network, bem como algoritmos tradicionais e redes neurais de classificação e regressão. Os experimentos demonstraram que o PyTorch obteve o melhor desempenho nos modelos de CNN e nas redes neurais de classificação e regressão usando GPU, enquanto o Keras obteve um melhor desempenho ao utilizar somente CPU.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Os avanços na área de IoT motivam a utilização de algoritmos de aprendizado de máquina em dispositivos embarcados. Entretanto, esses algoritmos exigem uma quantidade considerável de recursos computacionais. O objetivo deste trabalho consistiu em analisar algoritmos de aprendizado de máquina em dispositivos embarcados utilizando paralelismo em CPU e GPU com o intuito de compreender quais características de hardware e software desempenham melhor em relação ao consumo energético, inferências por segundo e acurácia. Foram avaliados três modelos de Convolutional Neural Network, bem como algoritmos tradicionais e redes neurais de classificação e regressão. Os experimentos demonstraram que o PyTorch obteve o melhor desempenho nos modelos de CNN e nas redes neurais de classificação e regressão usando GPU, enquanto o Keras obteve um melhor desempenho ao utilizar somente CPU. |
| Bianchessi, Arthur S.; Mallmann, Leonardo; Hoffmann, Renato Barreto; Griebler, Dalvan Conversão do NAS Parallel Benchmarks para C++ Standard Inproceedings doi In: Anais do XXIII Simpósio em Sistemas Computacionais de Alto Desempenho (WSCAD), pp. 313-324, SBC, Porto Alegre, Brasil, 2023. @inproceedings{BIANCHESSI:WSCAD:23,
title = {Conversão do NAS Parallel Benchmarks para C++ Standard},
author = {Arthur S. Bianchessi and Leonardo Mallmann and Renato Barreto Hoffmann and Dalvan Griebler},
url = {https://doi.org/10.5753/wscad.2023.235913},
doi = {10.5753/wscad.2023.235913},
year = {2023},
date = {2023-10-01},
booktitle = {Anais do XXIII Simpósio em Sistemas Computacionais de Alto Desempenho (WSCAD)},
pages = {313-324},
publisher = {SBC},
address = {Porto Alegre, Brasil},
abstract = {A linguagem C++ recebeu novas abstrações de paralelismo com a definição das políticas de execução dos algoritmos da biblioteca padrão. Entretanto, a adequabilidade e o desempenho dessa alternativa ainda necessita ser estudado em comparação com outras alternativas bem estabelecidas. Portanto, o objetivo deste trabalho foi explorar a vasta gama de opções de recursos da biblioteca padrão C++ para avaliar a aplicabilidade e desempenho a partir de cinco kernels do NPB. Através dos experimentos em um ambiente multithreaded, foi constatado que a incorporação de estruturas de dados da biblioteca padrão, assim como a abstração para acesso multidimensional criada, não apresentam impacto notável no tempo de execução. Já os algoritmos com políticas de execução paralela demonstraram uma perda de desempenho estatisticamente significativa.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
A linguagem C++ recebeu novas abstrações de paralelismo com a definição das políticas de execução dos algoritmos da biblioteca padrão. Entretanto, a adequabilidade e o desempenho dessa alternativa ainda necessita ser estudado em comparação com outras alternativas bem estabelecidas. Portanto, o objetivo deste trabalho foi explorar a vasta gama de opções de recursos da biblioteca padrão C++ para avaliar a aplicabilidade e desempenho a partir de cinco kernels do NPB. Através dos experimentos em um ambiente multithreaded, foi constatado que a incorporação de estruturas de dados da biblioteca padrão, assim como a abstração para acesso multidimensional criada, não apresentam impacto notável no tempo de execução. Já os algoritmos com políticas de execução paralela demonstraram uma perda de desempenho estatisticamente significativa. |
| Faé, Leonardo; Hoffmann, Renato Barreto; Griebler, Dalvan Source-to-Source Code Transformation on Rust for High-Level Stream Parallelism Inproceedings doi In: XXVII Brazilian Symposium on Programming Languages (SBLP), pp. 41-49, ACM, Campo Grande, Brazil, 2023. @inproceedings{FAE:SBLP:23,
title = {Source-to-Source Code Transformation on Rust for High-Level Stream Parallelism},
author = {Leonardo Faé and Renato Barreto Hoffmann and Dalvan Griebler},
url = {https://doi.org/10.1145/3624309.3624320},
doi = {10.1145/3624309.3624320},
year = {2023},
date = {2023-09-01},
booktitle = {XXVII Brazilian Symposium on Programming Languages (SBLP)},
pages = {41-49},
publisher = {ACM},
address = {Campo Grande, Brazil},
series = {SBLP'23},
abstract = {Utilizing parallel systems to their full potential can be challenging for general-purpose developers. A solution to this problem is to create high-level abstractions using Domain-Specific Languages (DSL). We create a stream-processing DSL for Rust, a growing programming language focusing on performance and safety. To that end, we explore Rust’s macros as a high-level abstraction tool to support an existing DSL language named SPar and perform source-to-source code transformations in the abstract syntax tree. We aim to assess the Rust source-to-source code transformations toolset and its implications. We highlight that Rust macros are powerful tools for performing source-to-source code transformations for abstracting structured stream processing. In addition, execution time and programmability results are comparable to other solutions.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Utilizing parallel systems to their full potential can be challenging for general-purpose developers. A solution to this problem is to create high-level abstractions using Domain-Specific Languages (DSL). We create a stream-processing DSL for Rust, a growing programming language focusing on performance and safety. To that end, we explore Rust’s macros as a high-level abstraction tool to support an existing DSL language named SPar and perform source-to-source code transformations in the abstract syntax tree. We aim to assess the Rust source-to-source code transformations toolset and its implications. We highlight that Rust macros are powerful tools for performing source-to-source code transformations for abstracting structured stream processing. In addition, execution time and programmability results are comparable to other solutions. |
| Faé, Leonardo; Griebler, Dalvan; Manssour, Isabel Benchmarking da Aplicação de Comparação de Similaridade entre Imagens com Flink, Storm e SPar Inproceedings doi In: Anais da XXIII Escola Regional de Alto Desempenho da Região Sul, pp. 93-96, Sociedade Brasileira de Computação, Porto Alegre, Brazil, 2023. @inproceedings{FAE:ERAD:23,
title = {Benchmarking da Aplicação de Comparação de Similaridade entre Imagens com Flink, Storm e SPar},
author = {Leonardo Faé and Dalvan Griebler and Isabel Manssour},
url = {https://doi.org/10.5753/eradrs.2023.229258},
doi = {10.5753/eradrs.2023.229258},
year = {2023},
date = {2023-05-01},
booktitle = {Anais da XXIII Escola Regional de Alto Desempenho da Região Sul},
pages = {93-96},
publisher = {Sociedade Brasileira de Computação},
address = {Porto Alegre, Brazil},
abstract = {Este trabalho apresenta comparações de desempenho entre as interfaces de programação SPar, Apache Flink e Apache Storm, no que diz respeito à execução de uma aplicação de comparação de imagens. Os resultados revelam que as versões da SPar apresentam um desempenho superior quando executadas com um grande número de threads, tanto em termos de latência quanto de throughput (a SPar tem um throughput cerca de 5 vezes maior com 40 workers).},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Este trabalho apresenta comparações de desempenho entre as interfaces de programação SPar, Apache Flink e Apache Storm, no que diz respeito à execução de uma aplicação de comparação de imagens. Os resultados revelam que as versões da SPar apresentam um desempenho superior quando executadas com um grande número de threads, tanto em termos de latência quanto de throughput (a SPar tem um throughput cerca de 5 vezes maior com 40 workers). |