@inproceedings{ubo_mods_00154820, author = {Ngo, Thao Phuong and Kunkel, Johannes and Ziegler, Jürgen}, title = {Exploring Mental Models for Transparent and Controllable Recommender Systems: A Qualitative Study}, booktitle = {UMAP 2020 - Proceedings of the 28th ACM Conference on User Modeling, Adaptation and Personalization}, year = {2020}, publisher = {Association for Computing Machinery (ACM)}, address = {New York}, pages = {183–191}, keywords = {transparent AI}, abstract = {While online content is personalized to an increasing degree, eg. using recommender systems (RS), the rationale behind personalization and how users can adjust it typically remains opaque. This was often observed to have negative effects on the user experience and perceived quality of RS. As a result, research increasingly has taken user-centric aspects such as transparency and control of a RS into account, when assessing its quality. However, we argue that too little of this research has investigated the users’ perception and understanding of RS in their entirety. In this paper, we explore the users’ mental models of RS. More specifically, we followed the qualitative grounded theory methodology and conducted 10 semi-structured face-to-face interviews with typical and regular Netflix users. During interviews participants expressed high levels of uncertainty and confusion about the RS in Netflix. Consequently, we found a broad range of different mental models. Nevertheless, we also identified a general structure underlying all of these models, consisting of four steps: data acquisition, inference of user profile, comparison of user profiles or items, and generation of recommendations. Based on our findings, we discuss implications to design more transparent, controllable, and user friendly RS in the future.}, isbn = {9781450368612}, doi = {10.1145/3340631.3394841} } @inproceedings{ubo_mods_00136811, author = {Kunkel, Johannes and Donkers, Tim and Michael, Lisa and Barbu, Catalin-Mihai and Ziegler, Jürgen}, title = {Let Me Explain: Impact of Personal and Impersonal Explanations on Trust in Recommender Systems}, booktitle = {Proceedings of the 37th International Conference on Human Factors in Computing Systems (CHI ’19)}, year = {2019}, publisher = {ACM}, address = {New York}, pages = {487:1–487:12}, isbn = {978-1-4503-5970-2}, doi = {10.1145/3290605.3300717}, url = {https://doi.org/10.1145/3290605.3300717}, abstract = {Trust in a Recommender System (RS) is crucial for its overall success. However, it remains underexplored whether users trust personal recommendation sources (i.e. other humans) more than impersonal sources (i.e. conventional RS), and, if they do, whether the perceived quality of explanation provided account for the difference. We conducted an empirical study in which we compared these two sources of recommendations and explanations. Human advisors were asked to explain movies they recommended in short texts while the RS created explanations based on item similarity. Our experiment comprised two rounds of recommending. Over both rounds the quality of explanations provided by users was assessed higher than the quality of the system’s explanations. Moreover, explanation quality significantly influenced perceived recommendation quality as well as trust in the recommendation source. Consequently, we suggest that RS should provide richer explanations in order to increase their perceived recommendation quality and trustworthiness.} } @inproceedings{ubo_mods_00106122, author = {Kunkel, Johannes and Donkers, Tim and Barbu, Catalin-Mihai and Ziegler, Jürgen}, booktitle = {2nd Workshop on Theory-Informed User Modeling for Tailoring and Personalizing Interfaces (HUMANIZE)}, title = {Trust-Related Effects of Expertise and Similarity Cues in Human-Generated Recommendations}, year = {2018}, keywords = {Structural Equation Modeling}, url = {http://ceur-ws.org/Vol-2068/humanize5.pdf}, abstract = {A user’s trust in recommendations plays a central role in the acceptance or rejection of a recommendation. One factor that influences trust is the source of the recommendations. In this paper we describe an empirical study that investigates the trust-related influence of social presence arising in two scenarios: human-generated recommendations and automated recommending. We further compare visual cues indicating the expertise of a human recommendation source and its similarity with the target user, and evaluate their influence on trust. Our analysis indicates that even subtle visual cues can signal expertise and similarity effectively, thus influencing a user’s trust in recommendations. These findings suggest that automated recommender systems could benefit from the inclusion of social components–especially when conveying characteristics of the recommendation source. Thus, more informative and persuasive recommendation interfaces may be designed using such a mixed approach.} }