@inproceedings{ubo_mods_00136811, author = {Kunkel, Johannes and Donkers, Tim and Michael, Lisa and Barbu, Catalin-Mihai and Ziegler, Jürgen}, title = {Let Me Explain: Impact of Personal and Impersonal Explanations on Trust in Recommender Systems}, booktitle = {Proceedings of the 37th International Conference on Human Factors in Computing Systems (CHI ’19)}, year = {2019}, publisher = {ACM}, address = {New York}, pages = {487:1–487:12}, isbn = {978-1-4503-5970-2}, doi = {10.1145/3290605.3300717}, url = {https://doi.org/10.1145/3290605.3300717}, abstract = {Trust in a Recommender System (RS) is crucial for its overall success. However, it remains underexplored whether users trust personal recommendation sources (i.e. other humans) more than impersonal sources (i.e. conventional RS), and, if they do, whether the perceived quality of explanation provided account for the difference. We conducted an empirical study in which we compared these two sources of recommendations and explanations. Human advisors were asked to explain movies they recommended in short texts while the RS created explanations based on item similarity. Our experiment comprised two rounds of recommending. Over both rounds the quality of explanations provided by users was assessed higher than the quality of the system’s explanations. Moreover, explanation quality significantly influenced perceived recommendation quality as well as trust in the recommendation source. Consequently, we suggest that RS should provide richer explanations in order to increase their perceived recommendation quality and trustworthiness.} } @inproceedings{ubo_mods_00116566, author = {Loepp, Benedikt and Donkers, Tim and Kleemann, Timm and Ziegler, Jürgen}, title = {Impact of Item Consumption on Assessment of Recommendations in User Studies}, booktitle = {Proceedings of the 12th ACM Conference on Recommender Systems (RecSys ’18)}, year = {2018}, publisher = {ACM}, address = {New York, NY, USA}, pages = {49–53}, keywords = {User Studies}, isbn = {978-1-4503-5901-6}, doi = {10.1145/3240323.3240375}, url = {https://dl.acm.org/doi/10.1145/3240323.3240375?cid=87958660357}, abstract = {In user studies of recommender systems, participants typically cannot consume the recommended items. Still, they are asked to assess recommendation quality and other aspects related to user experience by means of questionnaires. Without having listened to recommended songs or watched suggested movies, however, this might be an error-prone task, possibly limiting validity of results obtained in these studies. In this paper, we investigate the effect of actually consuming the recommended items. We present two user studies conducted in different domains showing that in some cases, differences in the assessment of recommendations and in questionnaire results occur. Apparently, it is not always possible to adequately measure user experience without allowing users to consume items. On the other hand, depending on domain and provided information, participants sometimes seem to approximate the actual value of recommendations reasonably well.} } @inproceedings{ubo_mods_00106122, author = {Kunkel, Johannes and Donkers, Tim and Barbu, Catalin-Mihai and Ziegler, Jürgen}, booktitle = {2nd Workshop on Theory-Informed User Modeling for Tailoring and Personalizing Interfaces (HUMANIZE)}, title = {Trust-Related Effects of Expertise and Similarity Cues in Human-Generated Recommendations}, year = {2018}, keywords = {Structural Equation Modeling}, url = {http://ceur-ws.org/Vol-2068/humanize5.pdf}, abstract = {A user’s trust in recommendations plays a central role in the acceptance or rejection of a recommendation. One factor that influences trust is the source of the recommendations. In this paper we describe an empirical study that investigates the trust-related influence of social presence arising in two scenarios: human-generated recommendations and automated recommending. We further compare visual cues indicating the expertise of a human recommendation source and its similarity with the target user, and evaluate their influence on trust. Our analysis indicates that even subtle visual cues can signal expertise and similarity effectively, thus influencing a user’s trust in recommendations. These findings suggest that automated recommender systems could benefit from the inclusion of social components–especially when conveying characteristics of the recommendation source. Thus, more informative and persuasive recommendation interfaces may be designed using such a mixed approach.} }