@inproceedings{1172, author = {Sean Stapleton and Yashmeet Gambhir and Alexander LeClair and Zachary Eberhart and Westley Weimer and Kevin Leach and Yu Huang}, title = {A Human Study of Comprehension and Code Summarization}, abstract = {Software developers spend a great deal of time reading and understanding code that is poorly-documented, written by other developers, or developed using differing styles. During the past decade, researchers have investigated techniques for automatically documenting code to improve comprehensibility. In particular, recent advances in deep learning have led to sophisticated summary generation techniques that convert functions or methods to simple English strings that succinctly describe that code s behavior. However, automatic summarization techniques are assessed using internal metrics such as BLEU scores, which measure natural language properties in translational models, or ROUGE scores, which measure overlap with human-written text. Unfortunately, these metrics do not necessarily capture how machine-generated code summaries actually affect human comprehension or developer productivity.We conducted a human study involving both university students and professional developers (n = 45). Participants reviewed Java methods and summaries and answered established program comprehension questions. In addition, participants completed coding tasks given summaries as specifications. Critically, the experiment controlled the source of the summaries: for a given method, some participants were shown human-written text and some were shown machine-generated text.We found that participants performed significantly better (p = 0.029) using human-written summaries versus machine-generated summaries. However, we found no evidence to support that participants perceive human- and machine-generated summaries to have different qualities. In addition, participants performance showed no correlation with the BLEU and ROUGE scores often used to assess the quality of machine-generated summaries. These results suggest a need for revised metrics to assess and guide automatic summarization techniques.}, year = {2020}, journal = {28th International Conference on Program Comprehension}, pages = {2–13}, month = {07/2020}, publisher = {Association for Computing Machinery}, address = {New York, NY, USA}, isbn = {9781450379588}, url = {https://doi.org/10.1145/3387904.3389258}, doi = {10.1145/3387904.3389258}, }