@misc{11642, author = {Tanja Gruschke and Magne J{\o}rgensen}, title = {How Much Does Feedback and Performance Review Improve Software Development Effort Estimation? an Empirical Study (extended Abstract)}, abstract = {Over-optimistic and over-confident software development effort estimates are more the rule than the exception. Several software improvement process frameworks, e.g., the Capability Maturity Model, are based on the assumption that improved feedback and use of performance reviews leads to higher degree of realism and better project performance. This paper investigates that assumptions, i.e., whether effort estimation and uncertainty assessment skills improve with better feedback processes and mandatory estimation performance reviews. We recruited 20 professional software developers with the necessary technological experience and skill to complete the same 5 software development tasks on an in-use web-system. They were paid standard industry wage corresponding to their level of experience and expertise. Work conditions were like those in industry. Ten of the participants received outcome feedback on their estimation performance and followed our estimation performance review instructions, the other ten acted as a control group with no performance review instructions and only on-the-job feedback. We found no or only minor differences in performance between the treatment groups and conclude that the feedback and performance reviews did not lead to improvement of estimation or uncertainty assessment skill. Possible reasons for this surprising finding include: a) Low motivation for learning (A high performance on effort estimation and uncertainty assessment may have been perceived as a much less important goal than being perceived as a skilled programmer through low effort estimates and high confidence.), and, b) The software developers who were instructed to learn from previous performance may have believed that they have learned more than they actually did. In a follow-up experiment 81 software professionals assessed the uncertainty of the effort estimates provided by the developers in the original experiment. We found that the software professionals assessing the uncertainty of other developers{\textquoteright} effort estimates achieved a much higher realism and better learning from history.}, year = {2006}, journal = {In International Symposium on Forecasting, edited by Antonio Garc{\`\i}a-Ferrer, Santander, 12-14 June 2006. International Institute of Forecasters, pages 103}, }