@inproceedings{c05c0c341c504431b98caec3892806c7,
title = "SCFormer: Integrating hybrid Features in Vision Transformers",
abstract = "Hybrid modules that combine self-attention and convolution operations can benefit from the advantages of both, and consequently achieve higher performance than either operation alone. However, current hybrid modules do not capitalize directly on the intrinsic relation between self-attention and convolution, but rather introduce external mechanisms that come with increased computation cost. In this paper, we propose a new hybrid vision transformer called Shift and Concatenate Transformer (SCFormer), which benefits from the intrinsic relationship between convolution and self-attention. SCFormer roots in the Shift and Concatenate Attention (SCA) block, that integrates convolution and self-attention features. We propose a shifting mechanism and corresponding aggregation rules for the feature integration of SCA blocks such that generated features more closely approximate the optimal output features. Extensive experiments show that, with comparable computational complexity, SCFormer consistently achieves improved results over competitive baselines on image recognition and downstream tasks. Our code is available at: https://github.com/hotfinda/SCFormer.",
keywords = "Vision transformer, feature integration, hybrid module",
author = "Hui Lu and Ronald Poppe and Albert Salah",
note = "Funding Information: ACKNOWLEDGMENT This work is supported in part by the scholarship from China Scholarship Council (CSC) under the Grant No.202106290068. Publisher Copyright: {\textcopyright} 2023 IEEE.",
year = "2023",
doi = "10.1109/ICME55011.2023.00323",
language = "English",
series = "Proceedings - IEEE International Conference on Multimedia and Expo",
publisher = "IEEE",
pages = "1883--1888",
booktitle = "Proceedings - 2023 IEEE International Conference on Multimedia and Expo, ICME 2023",
address = "United States",
}