@article{
  author = {S. Gracla and E. Beck and C. Bockelmann and A. Dekorsy},
  year = {2022},
  month = {Oct},
  title = {Robust Deep Reinforcement Learning Scheduling via Weight Anchoring},
  volume = {27},
  number = {1},
  publisher = {IEEE},
  pages = {210 - 213},
  URL = {https://www.comsoc.org/publications/journals/ieee-comml},
  abstract={Questions remain on the robustness of data-driven learning methods when crossing the gap from simulation to reality. We utilize weight anchoring, a method known from continual learning, to cultivate and fixate desired behavior in Neural Networks. Weight anchoring may be used to find a solution to a learning problem that is nearby the solution of another learning problem. Thereby, learning can be carried out in optimal environments without neglecting or unlearning desired behavior. We demonstrate this approach on the example of learning mixed QoS-efficient discrete resource scheduling with infrequent priority messages. Results show that this method provides performance comparable to the state of the art of augmenting a simulation environment, alongside significantly increased robustness and steerability.},
  journal={IEEE Communications Letters}
}