@article{8693, author = {Magne J{\o}rgensen and Tore Dyb{\r a} and Knut Liest{\o}l and Dag Sj{\o}berg}, title = {Incorrect Results in Software Engineering Experiments: How to Improve Research Practices}, abstract = {Context: The trustworthiness of research results is a growing concern in many empirical disciplines. Aim: The goals of this paper are to assess how much the trustworthiness of results reported in software engineering experiments is affected by researcher and publication bias and to suggest improved research practices. Method: First, we conducted a small-scale survey to document the presence of researcher and publication biases in software engineering experiments. Then, we built a model that estimates the proportion of correct results for different levels of researcher and publication bias. A review of 150 randomly selected software engineering experiments published in the period 2002-2012 was conducted to provide input to the model. Results: The survey indicates that researcher and publication bias is quite common. This finding is supported by the observation that the actual proportion of statistically significant results reported in the reviewed papers was about twice as high as the one expected assuming no researcher and publication bias. Our models suggest a high proportion of incorrect results even with quite conservative assumptions. Conclusion: Research practices must improve to increase the trustworthiness of software engineering experiments. A key to this improvement is to avoid conducting studies with unsatisfactory low statistical power.}, year = {2016}, journal = {Journal of Systems and Software}, volume = {117}, pages = {274-281}, publisher = {Elsevier}, }