@inproceedings{
  author = {E. Beck and C. Bockelmann and A. Dekorsy},
  year = {2024},
  month = {May},
  title = {Model-free Reinforcement Learning of Semantic Communication by Stochastic Policy Gradient},
  volume = {1},
  publisher = {IEEE},
  pages = {7},
  URL = {https://icmlcn2024.ieee-icmlcn.org/},
  abstract={Following the recent success of Machine Learning tools in wireless communications, the idea of semantic communication by Weaver from 1949 has gained attention. It breaks with Shannon's classic design paradigm by aiming to transmit the meaning, i.e., semantics, of a message instead of its exact version, allowing for information rate savings. In this work, we apply the Stochastic Policy Gradient (SPG) to design a semantic communication system by reinforcement learning, separating transmitter and receiver, and not requiring a known or differentiable channel model -- a crucial step towards deployment in practice. Further, we derive the use of SPG for both classic and semantic communication from the maximization of the mutual information between received and target variables. Numerical results show that our approach achieves comparable performance to a model-aware approach based on the reparametrization trick, albeit with a decreased convergence rate.},
  booktitle={IEEE International Conference on Machine Learning for Communication and Networking (ICMLCN 2024)}
}