@inproceedings{e32daced8f804510b960d78b57ce9791,
title = "Decoding affect in videos employing the MEG brain signal",
abstract = "This paper presents characterization of affect (valence and arousal) using the Magnetoencephalogram (MEG) brain signal. We attempt single-trial classification of movie and music videos with MEG responses extracted from seven participants. The main findings of this study are that: (i) the MEG signal effectively encodes affective viewer responses, (ii) clip arousal is better predicted than valence employing MEG and (iii) prediction performance is better for movie clips as compared to music videos.",
author = "Abadi, {Mojtaba Khomami} and Mostafa Kia and Ramanathan Subramanian and Paolo Avesani and Nicu Sebe",
year = "2013",
doi = "10.1109/FG.2013.6553809",
language = "English",
isbn = "9781467355452",
series = "2013 10th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition, FG 2013",
publisher = "IEEE, Institute of Electrical and Electronics Engineers",
pages = "1--6",
editor = "Rama Chellappa and Xilin Chen and Qiang Ji and Maja Pantic and Stan Sclaroff and Lijun Yin",
booktitle = "2013 10th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition, FG 2013",
address = "United States",
note = "2013 10th IEEE International Conference and Workshops on Automatic Face and Gesture Recognition, FG 2013 ; Conference date: 22-04-2013 Through 26-04-2013",
}