@inproceedings{a6d74de2d0c7404ba95531d97bbc0eff,
title = "Dyadic Interaction Detection from Pose and Flow",
abstract = "We propose a method for detecting dyadic interactions: fine-grained, coordinated interactions between two people. Our model is capable of recognizing interactions such as a hand shake or a high five, and locating them in time and space. At the core of our method is a pictorial structures model that additionally takes into account the fine-grained movements around the joints of interest during the interaction. Compared to a bag-of-words approach, our method not only allows us to detect the specific type of actions more accurately, but it also provides the specific location of the interaction. The model is trained with both video data and body joint estimates obtained from Kinect. During testing, only video data is required. To demonstrate the efficacy of our approach, we introduce the ShakeFive dataset that consists of videos and Kinect data of hand shake and high five interactions. On this dataset, we obtain a mean average precision of 49.56%, outperforming a bag-of-words approach by 23.32%. We further demonstrate that the model can be learned from just a few interactions.",
keywords = "Interaction, Action recognition, Dyadic, Computer vision",
author = "{van Gemeren}, Coert and Robby Tan and Ronald Poppe and Remco Veltkamp",
year = "2014",
doi = "10.1007/978-3-319-11839-0_9",
language = "English",
isbn = "978-3-319-11838-3",
series = "Lecture Notes in Computer Science",
publisher = "Springer",
pages = "101--115",
booktitle = "Proceedings of the International Workshop on Human Behavior Understanding (HBU)",
address = "Germany",
}