@inproceedings{
  author = {M. Hummert and S. Hassanpour and D. W\"{u}bben and A. Dekorsy},
  year = {2024},
  month = {Jun},
  title = {Deep FAVIB: Deep Learning-Based Forward-Aware Quantization via Information Bottleneck Method},
  URL = {https://icc2024.ieee-icc.org/},
  address={Denver, CO, USA},
  abstract={We focus on a (generic) joint source-channel coding problem, appearing in a broad variety of real-world application. Explicitly, a noisy observation from a user/source signal should be compressed, ahead of getting forwarded over an error-prone and rate-limited channel to a remote processing unit. The design problem shall be formulated in a fashion that the impacts of the forward link are taken into account. Aligned with the Information Bottleneck (IB) method, we consider the Mutual Information (MI) as the fidelity criterion, and work out a data-driven approach to tackle the underlying design problem based upon a finite sample set. For that, we derive a tractable variational lower-bound of the objective functional, and present a general learning architecture which can be used to optimize the given lower-bound by standard training of the encoder and decoder Deep Neural Networks. This approach that is, principally, based upon the (generative) latent variable models, extends the concepts of Variational AutoEncoder (VAE) and Deep Variational Information Bottleneck (Deep VIB) for (remote) source coding to the context of joint source-channel coding. We validate the effectiveness of our approach by several numerical simulations over typical transmission scenarios.},
  booktitle={IEEE International Conference on Communications (ICC)}
}