@article{16463, keywords = {software security, vulnerability prediction, model interpretability, identifier splitting, source code vocabulary}, author = {David Binkley and Leon Moonen and Sibren Isaacman}, title = {Featherweight Assisted Vulnerability Discovery}, abstract = {Predicting vulnerable source code helps to focus the attention of a developer, or a program analysis technique, on those parts of the code that need to be examined with more scrutiny. Recent work proposed the use of function names as semantic cues that can be learned by a deep neural network (DNN) to aid in the hunt for vulnerability of functions.Combining identifier splitting, which we use to split each function name into its constituent words, with a novel frequency-based algorithm, we explore the extent to which the words that make up a function{\textquoteright}s name can be used to predict potentially vulnerable functions. In contrast to the light-weight prediction provided by a DNN considering only function names, avoiding the need for a DNN provides feather-weight prediction. The underlying idea is that function names that contain certain {\textquotedblleft}dangerous{\textquotedblright} words are more likely to accompany vulnerable functions. Of course, this assumes that the frequency-based algorithm can be properly tuned to focus on truly dangerous words.Because it is more transparent than a DNN, which behaves as a {\textquotedblleft}black box{\textquotedblright} and thus provides no insight into the rationalization underlying its decisions, the frequency-based algorithm enables us to investigate the inner workings of the DNN. If successful, this investigation into what the DNN does and does not learn will help us train more effective future models.We empirically evaluate our approach on a heterogeneous dataset containing over 73 000 functions labeled vulnerable, and over 950 000 functions labeled benign. Our analysis shows that words alone account for a significant portion of the DNN{\textquoteright}s classification ability. We also find that words are of greatest value in the datasets with a more homogeneous vocabulary. Thus, when working within the scope of a given project, where the vocabulary is unavoidably homogeneous, our approach provides a cheaper, potentially complementary, technique to aid in the hunt for source-code vulnerabilities. Finally, this approach has the advantage that it is viable with orders of magnitude less training data.}, year = {2022}, journal = {Information and Software Technology}, volume = {146}, pages = {106844}, month = {06/2022}, publisher = {Elsevier}, issn = {0950-5849}, url = {https://www.sciencedirect.com/science/article/pii/S0950584922000209}, doi = {10.1016/j.infsof.2022.106844}, }