@misc{15579, author = {Y. Dai and D. Xu and K. Zhang and Y. Lu and Sabita Maharjan and Yan Zhang}, title = {Deep Reinforcement Learning for Edge Computing and Resource Allocation in 5G Beyond}, abstract = {By extending computation capacity to the edge of wireless networks, edge computing has the potential to enable computation-intensive and delay-sensitive applications in 5G and beyond via computation offloading. However, in multi-user heterogeneous networks, it is challenging to capture complete network information, such as wireless channel state, available bandwidth or computation resources. The strong couplings among devices on application requirements or radio access mode make it more difficult to design an optimal computation offloading scheme. Deep Reinforcement Learning (DRL) is an emerging technique to address such an issue with limited and less accurate network information. In this paper, we utilize DRL to design an optimal computation offloading and resource allocation strategy for minimizing system energy consumption. We first present a multi-user edge computing framework in heterogeneous networks. Then, we formulate the joint computation offloading and resource allocation problem as a DRL form and propose a new DRL-inspired algorithm to minimize system energy consumption. Numerical results based on a realworld dataset demonstrate demonstrate the effectiveness of our proposed algorithm, compared to two benchmark solutions.}, year = {2019}, journal = {2019 IEEE 19th International Conference on Communication Technology (ICCT)}, pages = {866-870}, publisher = {IEEE}, }