@inproceedings{loepp2023how, author = {Loepp, Benedikt and Ziegler, Jürgen}, booktitle = {RecSys ’23: Proceedings of the 17th ACM Conference on Recommender Systems}, title = {How Users Ride the Carousel: Exploring the Design of Multi-List Recommender Interfaces From a User Perspective}, year = {2023}, address = {New York, NY, USA}, publisher = {ACM}, isbn = {9798400702419}, url = {https://doi.org/10.1145/3604915.3610638}, doi = {10.1145/3604915.3610638}, abstract = {Multi-list interfaces are widely used in recommender systems, especially in industry, showing collections of recommendations, one below the other, with items that have certain commonalities. The composition and order of these "carousels" are usually optimized by simulating user interaction based on probabilistic models learned from item click data. Research that actually involves users is rare, with only few studies investigating general user experience in comparison to conventional recommendation lists. Hence, it is largely unknown how specific design aspects such as carousel type and length influence the individual perception and usage of carousel-based interfaces. This paper seeks to fill this gap through an exploratory user study. The results confirm previous assumptions about user behavior and provide first insights into the differences in decision making in the presence of multiple recommendation carousels.} } @inproceedings{ubo_mods_00191514, author = {Loepp, Benedikt}, title = {Recommender Systems Alone Are Not Everything: Towards a Broader Perspective in the Evaluation of Recommender Systems}, booktitle = {PERSPECTIVES ’22: Proceedings of the 2nd Workshop on Perspectives on the Evaluation of Recommender Systems}, year = {2022}, abstract = {Thus far, in most of the user experiments conducted in the area of recommender systems, the respective system is considered as an isolated component, i.e., participants can only interact with the recommender that is under investigation. This fails to recognize the situation of users in real-world settings, where the recommender usually represents only one part of a greater system, with many other options for users to find suitable items than using the mechanisms that are part of the recommender, e.g., liking, rating, or critiquing. For example, in current web applications, users can often choose from a wide range of decision aids, from text-based search over faceted filtering to intelligent conversational agents. This variety of methods, which may equally support users in their decision making, raises the question of whether the current practice in recommender evaluation is sufficient to fully capture the user experience. In this position paper, we discuss the need to take a broader perspective in future evaluations of recommender systems, and raise awareness for evaluation methods which we think may help to achieve this goal, but have not yet gained the attention they deserve.}, url = {http://ceur-ws.org/Vol-3228/paper5.pdf} } @inproceedings{ubo_mods_00167803, author = {Ma, Yuan and Kleemann, Timm and Ziegler, Jürgen}, title = {Mixed-Modality Interaction in Conversational Recommender Systems}, booktitle = {Proceedings of the 8th Joint Workshop on Interfaces and Human Decision Making for Recommender Systems}, series = {CEUR Workshop Proceedings}, year = {2021}, publisher = {}, address = {}, volume = {2948}, pages = {21–37}, keywords = {Conversational Recommender Systems; User Interface; Preference Elicitation; Critique-based Recommendations}, abstract = {Recent advances in natural language processing have made modern chatbots and Conversational Recommender Systems (CRS) increasingly intelligent, enabling them to handle more complex user inputs. Still, the interaction with a CRS is often tedious and error-prone. Especially when using written text as the form of conversation, the interaction is often less efficient in comparison to conventional GUI- style interaction. To keep the flexibility and mixed-initiative style of language-based conversation while leveraging the efficiency and simplicity of interacting through graphical widgets, we investigate the de- sign space of integrating GUI elements into text-based conversations. While simple response buttons have already been used in chatbots, the full range of such mixed-modality interactions has not yet been investigated in existing research. We propose two design dimensions along which integrations can be defined and analyze their applicability for preference elicitation and for critiquing the CRS’s responses at different levels. We report a user study in which we investigated user preferences and perceived usability of different techniques based on video prototypes.}, note = {OA platinum}, issn = {1613-0073}, url = {http://ceur-ws.org/Vol-2948/paper2.pdf}, language = {en} } @inproceedings{ubo_mods_00167689, author = {Kleemann, Timm and Wagner, Magdalena and Loepp, Benedikt and Ziegler, Jürgen}, title = {Modeling User Interaction at the Convergence of Filtering Mechanisms, Recommender Algorithms and Advisory Components}, booktitle = {Mensch Und Computer 2021 – Tagungsband}, year = {2021}, publisher = {ACM}, address = {New York, NY, USA}, pages = {531–543}, keywords = {Human factors; User experience; User modeling; Search interfaces; Recommender systems}, isbn = {978-1-4503-8645-6}, doi = {10.1145/3473856}, url = {https://dl.acm.org/doi/10.1145/3473856.3473859?cid=87958660357}, language = {en}, abstract = {A variety of methods is used nowadays to reduce the complexity of product search on e-commerce platforms, allowing users, for example, to specify exactly the features a product should have, but also, just to follow the recommendations automatically generated by the system. While such decision aids are popular with system providers, research to date has mostly focused on individual methods rather than their combination. To close this gap, we propose to support users in choosing the right method for the current situation. As a first step, we report in this paper a user study with a fictitious online shop in which users were able to flexibly use filter mechanisms, rely on recommendations, or follow the guidance of a dialog-based product advisor. We show that from the analysis of the interaction behavior, a model can be derived that allows predicting which of these decision aids is most useful depending on the user’s situation, and how this is affected by demographics and personality.} } @inproceedings{ubo_mods_00167688, author = {Loepp, Benedikt}, title = {On the Convergence of Intelligent Decision Aids}, booktitle = {Mensch Und Computer 2021 – Workshopband}, year = {2021}, publisher = {Gesellschaft für Informatik e.V.}, address = {Bonn}, keywords = {Decision support; Human factors; Information filtering; Adaptive systems; Recommender systems; User experience; User modeling}, abstract = {On the one hand, users’ decision making in today’s web is supported in numerous ways, with mechanisms ranging from manual search over automated recommendation to intelligent advisors. The focus on algorithmic accuracy, however, is questioned more and more. On the other hand, although the boundaries between the mechanisms are blurred increasingly, research on user-related aspects is still conducted separately in each area. In this position paper, we present a research agenda for providing a more holistic solution, in which users are supported with the right decision aid at the right time depending on personal characteristics and situational needs.}, doi = {10.18420/muc2021-mci-ws02-371}, url = {https://doi.org/10.18420/muc2021-mci-ws02-371}, language = {en} } @inproceedings{ubo_mods_00167074, author = {Hernandez-Bocanegra, Diana C. and Ziegler, Jürgen}, title = {Conversational Review-based Explanations for Recommender Systems: Exploring Users’ Query Behavior}, booktitle = {CUI 2021 - 3rd Conference on Conversational User Interfaces}, series = {ACM International Conference Proceeding Series}, year = {2021}, publisher = {Association for Computing Machinery (ACM)}, address = {New York}, keywords = {argumentation; conversational agent; explanations; Recommender systems; user study}, abstract = {Providing explanations based on user reviews in recommender systems (RS) can increase users’ perception of system transparency. While static explanations are dominant, interactive explanatory approaches have emerged in explainable artificial intelligence (XAI), so that users are more likely to examine system decisions and get more arguments supporting system assertions. However, little attention has been paid to conversational approaches for explanations targeting end users. In this paper we explore how to design a conversational interface to provide explanations in a review-based RS, and present the results of a Wizard of Oz (WoOz) study that provided insights into the type of questions users might ask in such a context, as well as their perception of a system simulating such a dialog. Consequently, we propose a dialog management policy and user intents for explainable review-based RS, taking as an example the hotels domain.}, isbn = {9781450389983}, doi = {10.1145/3469595.3469596}, url = {https://dl.acm.org/doi/10.1145/3469595.3469596?cid=99659550942}, language = {en} } @inproceedings{ubo_mods_00166661, author = {Hernandez Bocanegra, Diana Carolina and Ziegler, Jürgen}, editor = {Hansen, C. and Nürnberger, A. and Preim, B.}, title = {Argumentative explanations for recommendations - Effect of display style and profile transparency}, booktitle = {Mensch und Computer 2020}, year = {2020}, keywords = {Recommender systems, explanations, user study}, abstract = {Providing explanations based on user reviews in recommender systems may increase users’ perception of transparency. However, little is known about how these explanations should be presented to users in order to increase both their understanding and acceptance. We present in this paper a user study to investigate the effect of different display styles (visual and text only) on the perception of review-based explanations for recommended hotels. Additionally, we also aim to test the differences in users’ perception when providing information about their own profiles, in addition to a summarized view on the opinions of other users about the recommended hotel. Our results suggest that the perception of explanations regarding these aspects may vary depending on user characteristics, such as decision-making styles or social awareness.}, doi = {10.18420/muc2020-ws111-338}, url = {https://doi.org/10.18420/muc2020-ws111-338}, language = {en} } @inproceedings{ubo_mods_00154785, author = {Naveed, Sidra and Loepp, Benedikt and Ziegler, Jürgen}, title = {On the Use of Feature-based Collaborative Explanations: An Empirical Comparison of Explanation Styles}, booktitle = {ExUM ’20: Proceedings of the International Workshop on Transparent Personalization Methods based on Heterogeneous Personal Data}, year = {2020}, publisher = {ACM}, address = {New York}, pages = {226–232}, keywords = {User Experience}, doi = {10.1145/3386392.3399303}, url = {https://dl.acm.org/doi/10.1145/3386392.3399303?cid=87958660357}, abstract = {Current attempts to explain recommendations mostly exploit a single type of data, i.e. usually either ratings provided by users for items in collaborative filtering systems, or item features in content-based systems. While this might be sufficient in straightforward recommendation scenarios, the complexity of other situations could require the use of multiple datasources, for instance, depending on the product domain. Even though hybrid systems have a long and successful history in recommender research, the connections between user ratings and item features have only rarely been used for offering more informative and transparent explanations. In previous work, we presented a prototype system based on a feature-weighting mechanism that constitutes an exception, allowing to recommend both items and features based on ratings while offering advanced explanations based on content data. In this paper, we empirically evaluate this prototype in terms of user-oriented aspects and user experience against to widely accepted baselines. Two user studies show that our novel approach outperforms conventional collaborative filtering, while a pure content-based system was perceived in a similarly positive light. Overall, the results draw a promising picture, which becomes particularly apparent from a user perspective when participants were specifically asked to use the explanations: they indicated in their qualitative feedback that they understood them and highly appreciated their availability.} } @inproceedings{ubo_mods_00154786, author = {Hernandez-Bocanegra, Diana C. and Donkers, Tim and Ziegler, Jürgen}, title = {Effects of Argumentative Explanation Types on the Perception of Review-Based Recommendations}, booktitle = {Adjunct Proceedings of the 28th ACM Conference on User Modeling, Adaptation and Personalization (UMAP ’20 Adjunct)}, year = {2020}, publisher = {Association for Computing Machinery (ACM)}, address = {New York}, pages = {219–225}, keywords = {user study}, abstract = {Recommender systems have achieved considerable maturity and accuracy in recent years. However, the rationale behind recommendations mostly remains opaque. Providing textual explanations based on user reviews may increase users’ perception of transparency and, by that, overall system satisfaction. However, little is known about how these explanations can be effectively and efficiently presented to the user. In the following paper, we present an empirical study conducted in the domain of hotels to investigate the effect of different textual explanation types on, among others, perceived system transparency and trustworthiness, as well as the overall assessment of explanation quality. The explanations presented to participants follow an argument-based design, which we propose to provide a rationale to support a recommendation in a structured way. Our results show that people prefer explanations that include an aggregation using percentages of other users’ opinions, over explanations that only include a brief summary of opinions. The results additionally indicate that user characteristics such as social awareness may influence the perception of explanation quality.}, isbn = {9781450367110}, doi = {10.1145/3386392.3399302}, url = {https://dl.acm.org/doi/10.1145/3386392.3399302?cid=99659550942} } @inproceedings{ubo_mods_00144402, author = {Loepp, Benedikt and Ziegler, Jürgen}, title = {Measuring the Impact of Recommender Systems – A Position Paper on Item Consumption in User Studies}, booktitle = {Proceedings of the 1st Workshop on Impact of Recommender Systems (ImpactRS ’19)}, year = {2019}, keywords = {User Studies}, url = {https://impactrs19.github.io/papers/short4.pdf}, abstract = {While participants of recommender systems user studies usually cannot experience recommended items, it is common practice that researchers ask them to fill in questionnaires regarding the quality of systems and recommendations. While this has been shown to work well under certain circumstances, it sometimes seems not possible to assess user experience without enabling users to consume items, raising the question of whether the impact of recommender systems has always been measured adequately in past user studies. In this position paper, we aim at exploring this question by means of a literature review and at identifying aspects that need to be further investigated in terms of their influence on assessments in users studies, for instance, the difference between consumption of products or only of related information as well as the effect of domain, domain knowledge and other possibly confounding factors.} } @inproceedings{ubo_mods_00140449, author = {Torkamaan, Helma and Barbu, Catalin-Mihai and Ziegler, Jürgen}, editor = {Bogers, Toine and Said, Alan}, title = {How Can They Know That? A Study of Factors Affecting the Creepiness of Recommendations}, booktitle = {Proceedings of the 13th ACM Conference on Recommender Systems}, year = {2019}, publisher = {ACM}, address = {New York, NY}, pages = {423–427}, keywords = {Trust}, isbn = {978-1-4503-6243-6}, doi = {10.1145/3298689.3346982}, abstract = {Recommender systems (RS) often use implicit user preferences extracted from behavioral and contextual data, in addition to traditional rating-based preference elicitation, to increase the quality and accuracy of personalized recommendations. However, these approaches may harm user experience by causing mixed emotions, such as fear, anxiety, surprise, discomfort, or creepiness. RS should consider users’ feelings, expectations, and reactions that result from being shown personalized recommendations. This paper investigates the creepiness of recommendations using an online experiment in three domains: movies, hotels, and health. We define the feeling of creepiness caused by recommendations and find out that it is already known to users of RS. We further find out that the perception of creepiness varies across domains and depends on recommendation features, like causal ambiguity and accuracy. By uncovering possible consequences of creepy recommendations, we also learn that creepiness can have a negative influence on brand and platform attitudes, purchase or consumption intention, user experience, and users’ expectations of—and their trust in—RS.} } @inproceedings{ubo_mods_00139552, author = {Kleemann, Timm and Ziegler, Jürgen}, title = {Integration of Dialog-based Product Advisors into Filter Systems}, booktitle = {Proceedings of the Conference on Mensch und Computer}, series = {ACM International Conference Proceeding Series}, year = {2019}, publisher = {ACM Press}, address = {New York}, pages = {67–77}, keywords = {Dialogbasierte Produktberater, Filtersysteme}, isbn = {978-1-4503-7198-8}, doi = {10.1145/3340764.3340786}, abstract = { Different techniques such as search functions or recommendation components are used today to support the often complex product search on the Internet. Faceted filter systems that successively limit the result set according to the set filter settings have proven to be quite successful. However, this method requires clear objectives and domain knowledge on the part of the users. As an alternative, conversational product advisors who select suitable products on the basis of a sequence of questions have gained more importance in recent times, whereby the questions are based more on the tasks and application scenarios of the users than on the technical properties of the products. However, there is currently a lack of approaches that integrate filter systems and conversational advisors in a meaningful and closely coupled way. In this paper an integrated approach is presented, where users can switch between filter systems and advisory dialogues, whereby selection actions in one component have a consistent and transparent effect on the other component and can be further adjusted there. The aim is to better support users with different levels of knowledge of the product type concerned. We describe the requirements for such integrated systems resulting from our approach and report on a user study in which the user behavior and the subjective evaluation were examined in a prototypical implementation.} } @inproceedings{ubo_mods_00136811, author = {Kunkel, Johannes and Donkers, Tim and Michael, Lisa and Barbu, Catalin-Mihai and Ziegler, Jürgen}, title = {Let Me Explain: Impact of Personal and Impersonal Explanations on Trust in Recommender Systems}, booktitle = {Proceedings of the 37th International Conference on Human Factors in Computing Systems (CHI ’19)}, year = {2019}, publisher = {ACM}, address = {New York}, pages = {487:1–487:12}, isbn = {978-1-4503-5970-2}, doi = {10.1145/3290605.3300717}, url = {https://doi.org/10.1145/3290605.3300717}, abstract = {Trust in a Recommender System (RS) is crucial for its overall success. However, it remains underexplored whether users trust personal recommendation sources (i.e. other humans) more than impersonal sources (i.e. conventional RS), and, if they do, whether the perceived quality of explanation provided account for the difference. We conducted an empirical study in which we compared these two sources of recommendations and explanations. Human advisors were asked to explain movies they recommended in short texts while the RS created explanations based on item similarity. Our experiment comprised two rounds of recommending. Over both rounds the quality of explanations provided by users was assessed higher than the quality of the system’s explanations. Moreover, explanation quality significantly influenced perceived recommendation quality as well as trust in the recommendation source. Consequently, we suggest that RS should provide richer explanations in order to increase their perceived recommendation quality and trustworthiness.} } @inproceedings{ubo_mods_00132857, author = {Barbu, Catalin-Mihai and Carbonell, Guillermo and Ziegler, Jürgen}, title = {The Influence of Trust Cues on the Trustworthiness of Online Reviews for Recommendations}, booktitle = {Proceedings of the 34th ACM/SIGAPP Symposium on Applied Computing}, year = {2019}, publisher = {ACM Press}, address = {New York}, pages = {1687–1689}, keywords = {User study}, isbn = {978-1-4503-5933-7}, doi = {10.1145/3297280.3297603}, abstract = {In recent years, recommender systems have started to exploit user-generated content, in particular online reviews, as an additional means of personalizing and explaining their predictions. However, reviews that are poorly written or perceived as fake may have a detrimental effect on the users’ trust in the recommendations. Embedding so-called "trust cues" in the user interface is a technique that can help users judge the trustworthiness of presented information. We report preliminary results from an online user study that investigated the impact of trust cues—in the form of helpfulness votes—on the trustworthiness of online reviews for recommendations.} } @article{ubo_mods_00127145, author = {Carbonell, Guillermo and Barbu, Catalin-Mihai and Vorgerd, Laura and Brand, Matthias}, title = {The impact of emotionality and trust cues on the perceived trustworthiness of online reviews}, journal = {Cogent Business and Management}, year = {2019}, volume = {6}, number = {1}, pages = {1586062}, keywords = {trust cues}, abstract = {Online reviews and trust cues are two core aspects of e-commerce. Based on these features, users can make informed decisions about the products and services they buy online. Although prior studies have investigated on various review characteristics, the writing style has been examined less frequently. This empirical study simulated an e-commerce platform, in which participants (N =?124) were confronted with the reviews and helpfulness votes of other users while searching for one certain product (i.e. a laptop). The task was to rate how trustworthy or fake the reviews are, and the purchase intention after reading each review. Our results show that a factual writing style is considered more trustworthy, less fake, and entails a higher purchase intention when compared to emotional reviews. The trust cues were only relevant in interaction with variables that measure trust in the Internet as a safe environment for making monetary transactions. Furthermore, we found that trustworthiness influenced purchase intention, but the fakeness perception of the review does not yield such effects. We suggest future studies to understand this result and highlight implications for platform design.}, issn = {2331-1975}, doi = {10.1080/23311975.2019.1586062} } @inproceedings{ubo_mods_00117943, author = {Kizina, Anna and Kunkel, Johannes and Ziegler, Jürgen}, title = {Ein kollaboratives Task-Management-System mit spielerischen Elementen}, booktitle = {Mensch und Computer 2018: Workshopband}, year = {2018}, publisher = {Gesellschaft für Informatik e.V.}, address = {Bonn}, keywords = {Kollaboration}, issn = {2510-2672}, doi = {10.18420/muc2018-ws03-0477} } @inproceedings{ubo_mods_00116566, author = {Loepp, Benedikt and Donkers, Tim and Kleemann, Timm and Ziegler, Jürgen}, title = {Impact of Item Consumption on Assessment of Recommendations in User Studies}, booktitle = {Proceedings of the 12th ACM Conference on Recommender Systems (RecSys ’18)}, year = {2018}, publisher = {ACM}, address = {New York, NY, USA}, pages = {49–53}, keywords = {User Studies}, isbn = {978-1-4503-5901-6}, doi = {10.1145/3240323.3240375}, url = {https://dl.acm.org/doi/10.1145/3240323.3240375?cid=87958660357}, abstract = {In user studies of recommender systems, participants typically cannot consume the recommended items. Still, they are asked to assess recommendation quality and other aspects related to user experience by means of questionnaires. Without having listened to recommended songs or watched suggested movies, however, this might be an error-prone task, possibly limiting validity of results obtained in these studies. In this paper, we investigate the effect of actually consuming the recommended items. We present two user studies conducted in different domains showing that in some cases, differences in the assessment of recommendations and in questionnaire results occur. Apparently, it is not always possible to adequately measure user experience without allowing users to consume items. On the other hand, depending on domain and provided information, participants sometimes seem to approximate the actual value of recommendations reasonably well.} } @inproceedings{ubo_mods_00090298, author = {Barbu, Catalin-Mihai and Ziegler, Jürgen}, editor = {Neidhardt, Julia and Fesenmaier, Daniel and Kuflik, Tsvi and Wörndl, Wolfgang}, chapter = {}, title = {Co-Staying: a Social Network for Increasing the Trustworthiness of Hotel Recommendations}, series = {CEUR workshop proceedings}, year = {2017}, volume = {1906}, pages = {35–39}, keywords = {Trustworthiness}, abstract = {Recommender systems attempt to match users’ preferences with items. To achieve this, they typically store and process a large amount of user profiles, item attributes, as well as an ever-increasing volume of user-generated feedback about those items. By mining user-generated data, such as reviews, a complex network consisting of users, items, and item properties can be created. Exploiting this network could allow a recommender system to identify, with greater accuracy, items that users are likely to find attractive based on the attributes mentioned in their past reviews as well as in those left by similar users. At the same time, allowing users to visualize and explore the network could lead to novel ways of interacting with recommender systems and might play a role in increasing the trustworthiness of recommendations. We report on a conceptual model for a multimode network for hotel recommendations and discuss potential interactive mechanisms that might be employed for visualizing it.}, url = {http://ceur-ws.org/Vol-1906/paper6.pdf}, booktitle = {RecTour 2017: 2nd Workshop on Recommenders in Tourism : Proceedings of the 2nd Workshop on Recommenders in Tourism co-located with 11th ACM Conference on Recommender Systems (RecSys 2017) Como, Italy, August 27, 2017} } @inproceedings{ubo_mods_00090297, author = {Barbu, Catalin-Mihai and Ziegler, Jürgen}, editor = {Domonkos, Tikk and Pu, Pearl}, chapter = {}, title = {Users’ Choices About Hotel Booking: Cues for Personalizing the Presentation of Recommendations}, series = {CEUR workshop proceedings}, year = {2017}, volume = {1905}, pages = {44–45}, keywords = {Tourism}, abstract = {Personalization in recommender systems has typically been applied to the underlying algorithms. In contrast, the presentation of individual recommendations—specifically, the various ways in which it can be adapted to suit the user’s needs in a more effective manner—has received relatively little attention by comparison. We present the results of an exploratory survey about users’ choices regarding hotel recommendations and draw preliminary conclusions about whether these choices can influence the presentation of recommendations.}, url = {http://ceur-ws.org/Vol-1905/recsys2017_poster22.pdf}, booktitle = {Poster Proceeding of ACM Recsys 2017: Proceedings of the Poster Track of the 11th ACM Conference on Recommender Systems (RecSys 2017) Como, Italy, August 28, 2017} } @inproceedings{ubo:26020, author = {Lohmann, Steffen and Tomanek, Katrin and Ziegler, Jürgen and Hahn, Udo}, editor = {Ohlsson, Stellan and Catrambone, Richard}, chapter = {}, title = {Getting at the Cognitive Complexity of Linguistic Metadata Annotation: A Pilot Study Using Eye-Tracking}, year = {2010}, publisher = {Cognitive Science Society}, address = {Austin, TX}, abstract = {We report on an experiment where the decision behavior of annotators issuing linguistic metadata is observed with an eye-tracking device. As experimental conditions we consider the role of textual context and linguistic complexity classes. Still preliminary in nature, our data suggests that semantic complexity is much harder to deal with than syntactic one, and that full-scale textual context is negligible for annotation, with the exception of semantic high-complexity cases. We claim that such observational data might lay the foundation for empirically grounded annotation cost models and the design of cognitively adequate annotation user interfaces.}, url = {http://palm.mindmodeling.org/cogsci2010/papers/0508/paper0508.pdf}, booktitle = {Proceedings of the 32nd Annual Meeting of the Cognitive Science Society (CogSci 2010)} } @inproceedings{ubo:26371, author = {Nacke, E. Lennart and Schild, Jonas and Niesenhaus, Jörg}, editor = {Calvi, Licia and Gualeni, Stefano and Nuijten, Koos and Nacke, E. Lennart and Poels, Karolien}, chapter = {}, title = {Gameplay experience testing with playability and usability surveys – An experimental pilot study}, year = {2010}, publisher = {NHTV Expertise Series}, address = {Breda}, url = {http://www.acagamic.com/uploads/2007/09/Playability-submission.final_.submission.pdf}, abstract = {This pilot study investigates an experimental methodology for gathering data to create correlations between experiential factors measured by a gameplay experience questionnaire and player quality measures, such as playing frequency, choice of game, and playing time. The characteristics of two distinct games were examined concerning the aspects of game experience, subjective game quality, and game usability. Interactions within the three aspects were identified. The results suggest that gameplay experience dimensions flow and immersion are similarly motivating in different game genres, which however might not be equally enjoyable. On the one hand, usability ratings may be positively influenced when a game provides immersion and flow or on the other hand, flow and immersion may be negatively influenced by poor usability ratings. These results emphasize the need for an approach to classify games based on correlation patterns involving game experience, quality, and usability.}, booktitle = {Playability and player experience: Proceedings of the Fun and Games 2010 Workshop} } @inproceedings{ubo:26019, author = {Tomanek, Katrin and Hahn, Udo and Lohmann, Steffen and Ziegler, Jürgen}, editor = {Linguistics, Association for Computational}, chapter = {}, title = {A Cognitive Cost Model of Annotations Based on Eye-Tracking Data}, year = {2010}, publisher = {ACL}, address = {Uppsala}, abstract = {We report on an experiment where the decision behavior of annotators issuing linguistic metadata is observed with an eyetracking device. As experimental conditions we consider the role of textual context and linguistic complexity classes. Still preliminary in nature, our data suggests that semantic complexity is much harder to deal with than syntactic one, and that full-scale textual context is negligible for annotation, with the exception of semantic high-complexity cases. We claim that such observational data might lay the foundation for empirically grounded annotation cost models and the design of cognitively adequate annotation user interfaces.}, isbn = {978-1-932432-66-4}, url = {http://www.aclweb.org/anthology-new/P/P10/P10-1118.pdf}, booktitle = {Proceedings of the 48th Annual Meeting of the Association for Computational Linguistics (ACL 2010)} } @inproceedings{ubo:24670, author = {Nacke, E. Lennart and Drachen, Anders and Kuikkaniemi, Kai and Niesenhaus, Jörg and Korhohnen, J. Hannu and Hoogen, den Wouter van and Ijsselsteijn, Wijnand and Kort, de Yvonne}, editor = {DiGRA}, chapter = {}, title = {Playability and Player Experience Research}, year = {2009}, address = {London, UK}, abstract = {As the game industry matures and games become more and more complex, there is an increasing need to develop scientific methodologies for analyzing and measuring player experience, in order to develop a better understanding of the relationship and interactions between players and games. This panel gathers distinguished European playability and user experience experts to discuss current findings and methodological advancements within player experience and playability research.}, url = {http://www.bth.se/fou/forskinfo.nsf/17e96a0dab8ab6a1c1257457004d59ab/e0a8cdd8cfc0c7e6c125762c005557c0/$file/Nacke-etal-Panel%20Playability%20and%20Player%20Experience.pdf}, booktitle = {Proceedings of DiGRA 2009: Breaking New Ground: Innovation in Games, Play, Practice and Theory.} } @inproceedings{ubo:18188, author = {El Jerroudi, Zoulfa and Ziegler, Jürgen and Meissner, Stephan and Philipsenburg, Axel}, editor = {Stary, C.}, chapter = {}, title = {E-Quest: Ein Online-Befragungswerkzeug für Web Usability}, year = {2005}, publisher = {Oldenbourg Verlag}, address = {München}, abstract = {E-Quest ist ein Werkzeug zur automatisierten Online-Befragungen. Es bietet ohne großen Konfigurationsaufwand die Möglichkeit zur komfortablen Gestaltung der Fragebögen und vielfältigen Auswertungsmöglichkeiten, um die Usability einer Webseite zu evaluieren.}, booktitle = {Mensch & Computer 2005: Kunst und Wissenschaft - Grenzüberschreitungen der interaktiven ART} }