Considering the increasing applications of Large Language Models (LLMs) to many natural language tasks, this paper presents preliminary findings on developing a verification component for detecting hallucinations of an LLM that produces SPARQL queries from natural language questions. We suggest a logic-based deductive verification of the generated SPARQL query by checking if the original NL question’s deep semantic representation entails the SPARQL’s semantic representation.
@inproceedings{rademaker-etal-2024-deductive, title = {Deductive Verification of {LLM} Generated {SPARQL} Queries}, author = {Rademaker, Alexandre and Lima, Guilherme and Fiorini, Sandro Rama and da Silva, Viviane Torres}, editor = {S{\'e}rasset, Gilles and Oliveira, Hugo Gon{\c{c}}alo and Oleskeviciene, Giedre Valunaite}, booktitle = {Proceedings of the Workshop on Deep Learning and Linked Data (DLnLD) @ LREC-COLING 2024}, month = may, year = {2024}, address = {Torino, Italia}, publisher = {ELRA and ICCL}, url = {https://aclanthology.org/2024.dlnld-1.4}, pages = {45--52} }