@inproceedings{ubo_mods_00199546,
  author = {Ma, Yuan and Donkers, Tim and Kleemann, Timm and Ziegler, Jürgen},
  editor = {Gwizdka, Jacek and Rieh, Soo Young},
  title = {An Instrument for measuring users’ meta-intents},
  booktitle = {CHIIR ’23: Proceedings of the 2023 Conference on Human Information Interaction and Retrieval},
  year = {2023},
  publisher = {ACM},
  address = {Washington},
  pages = {290–302},
  abstract = {We propose the concept of meta-intents which represent high-level user preferences related to the interaction and decision-making in conversational recommender systems (CRS) and present a questionnaire instrument for measuring meta-intents. We conducted a two-stage user study, an exploratory study with 212 participants on Prolific, and a confirmatory study with 394 participants on Prolific. We obtained a reliable and stable meta-intents questionnaire with 22 question items, corresponding to seven latent factors (concepts). These seven factors cover important interaction preferences and are closely related to users’ decision-making process. For example, the factor dialog-initiative reflects whether users prefer to follow the system’s guidance or ask their own questions in a CRS. We conducted statistical analyses of meta-intents in two domains (smartphones and hotels), and a general chatbot scenario. We also investigated the influence of additional factors (demography, decision-making style) on meta-intents through Structural Equation Modeling (SEM). Our results provide preliminary evidence that the proposed meta-intents are domain and demography (gender, age) independent. They can be linked to the general decision-making style and can thus be instrumental in translating general decision-making factors into more concrete design guidance for CRS and their potential personalization. Meta-intents also provide a basis for future analyses of interaction behavior in CRS and the development of a cognitively founded theoretical framework.},
  isbn = {979-8-4007-0035-4},
  doi = {10.1145/3576840},
  url = {https://doi.org/10.1145/3576840.3578317},
  language = {en}
}


@phdthesis{ubo_mods_00181111,
  author = {Hernandez Bocanegra, Diana Carolina},
  title = {Argumentative Explanations for Recommendations Based on Reviews},
  year = {2022},
  address = {Duisburg, Essen},
  keywords = {Recommender systems, Explanations, Argumentation, Interactive interfaces design, Conversational agent, Dataset, Empirical studies},
  abstract = {Recommender systems (RS) assist users in making decisions on a wide range of tasks, while preventing them from being overwhelmed by enormous amounts of choices. RS prevalence is such that many users of information-based technologies interact with them on a daily basis. However, many of these systems are still perceived as black boxes by users, who often have no way of seeing or requesting the reasons why certain items are recommended, potentially leading to negative attitudes towards RS by users. Providing explanations in RS can bring several advantages for users’ decision making and overall user experience. Although different explanatory approaches have been proposed so far, the general lack of user evaluation, and validation of concepts and implementations of explainable methods in RS, have left open many questions, related to how such explanations should be structured and presented. Also, while explanations in RS have so far been presented mostly in a static and non-interactive manner, limited work in explainable artificial intelligence have emerged addressing interactive explanations, enabling users to examine in detail system decisions. However, little is known about how interactive interfaces in RS should be conceptualized and designed, so that explanatory aims such as transparency and trust are met. This dissertation investigates interactive, conversational explanations that enable users to freely explore explanatory content at will. Our work is grounded on RS explainable methods that exploit user reviews, and inspired by dialog models and formal argument structures. Following a user-centered approach, this dissertation proposes an interface design for explanations as interactive argumentation, which was empirically validated through different user studies. To this end, we implemented a RS able to provide explanations both through a graphical user interface (GUI) navigation and a natural language interface. The latter consists of a conversational agent for explainable RS, which supports conversation flows for different types of questions written by users in their own words. To this end, we formulated a model to facilitate the detection of the intent expressed by a user on a question, and collected and annotated a dataset helpful for intent detection, which can facilitate the development of explanatory dialog systems in RS. The results reported in this dissertation indicate that providing interactive explanations through a conversation, i.e. an exchange of questions and answers between the user and the system, using both GUI-navigation or natural language conversation, can positively impact users evaluation of explanation quality and of the system, in terms of explanatory aims like transparency, and trust.},
  school = {University of Duisburg-Essen},
  doi = {10.17185/duepublico/75833},
  url = {https://doi.org/10.17185/duepublico/75833}
}


@inproceedings{ubo_mods_00168051,
  author = {Donkers, Tim and Ziegler, Jürgen},
  title = {The Dual Echo Chamber: Modeling Social Media Polarization for Interventional Recommending},
  booktitle = {Fifteenth ACM Conference on Recommender Systems},
  year = {2021},
  publisher = {Association for Computing Machinery, Inc},
  address = {New York},
  pages = {12–22},
  keywords = {Agent-based modeling; Knowledge graphs; Machine learning; Recommender systems},
  isbn = {9781450384582},
  doi = {10.1145/3460231.3474261},
  url = {https://doi.org/10.1145/3460231.3474261},
  language = {en}
}


@article{ubo_mods_00154868,
  author = {Donkers, Tim and Ziegler, Jürgen},
  title = {Leveraging Arguments in User Reviews for Generating and Explaining Recommendations},
  journal = {Datenbank-Spektrum},
  year = {2020},
  publisher = {Springer},
  address = {Berlin},
  volume = {20},
  number = {2},
  pages = {181–187},
  abstract = {Review texts constitute a valuable source for making system-generated recommendations both more accurate and more transparent. Reviews typically contain statements providing argumentative support for a given item rating that can be exploited to explain the recommended items in a personalized manner. We propose a novel method called Aspect-based Transparent Memories (ATM) to model user preferences with respect to relevant aspects and compare them to item properties to predict ratings, and, by the same mechanism, explain why an item is recommended. The ATM architecture consists of two neural memories that can be viewed as arrays of slots for storing information about users and items. The first memory component encodes representations of sentences composed by the target user while the second holds an equivalent representation for the target item based on statements of other users. An offline evaluation was performed with three datasets, showing advantages over two baselines, the well-established Matrix Factorization technique and a recent competitive representative of neural attentional recommender techniques.},
  issn = {1618-2162},
  doi = {10.1007/s13222-020-00350-y}
}


@inproceedings{ubo_mods_00154786,
  author = {Hernandez-Bocanegra, Diana C. and Donkers, Tim and Ziegler, Jürgen},
  title = {Effects of Argumentative Explanation Types on the Perception of Review-Based Recommendations},
  booktitle = {Adjunct Proceedings of the 28th ACM Conference on User Modeling, Adaptation and Personalization (UMAP ’20 Adjunct)},
  year = {2020},
  publisher = {Association for Computing Machinery (ACM)},
  address = {New York},
  pages = {219–225},
  keywords = {user study},
  abstract = {Recommender systems have achieved considerable maturity and accuracy in recent years. However, the rationale behind recommendations mostly remains opaque. Providing textual explanations based on user reviews may increase users’ perception of transparency and, by that, overall system satisfaction. However, little is known about how these explanations can be effectively and efficiently presented to the user. In the following paper, we present an empirical study conducted in the domain of hotels to investigate the effect of different textual explanation types on, among others, perceived system transparency and trustworthiness, as well as the overall assessment of explanation quality. The explanations presented to participants follow an argument-based design, which we propose to provide a rationale to support a recommendation in a structured way. Our results show that people prefer explanations that include an aggregation using percentages of other users’ opinions, over explanations that only include a brief summary of opinions. The results additionally indicate that user characteristics such as social awareness may influence the perception of explanation quality.},
  isbn = {9781450367110},
  doi = {10.1145/3386392.3399302},
  url = {https://dl.acm.org/doi/10.1145/3386392.3399302?cid=99659550942}
}


@inproceedings{ubo_mods_00148660,
  author = {Donkers, Tim and Kleemann, Timm and Ziegler, Jürgen},
  editor = {Paternò, Fabio and Oliver, Nuria},
  title = {Explaining Recommendations by Means of Aspect-Based Transparent Memories},
  booktitle = {Proceedings of the 25th International Conference on Intelligent User Interfaces},
  year = {2020},
  publisher = {The Association for Computing Machinery},
  address = {New York, NY},
  pages = {166–176},
  isbn = {978-1-4503-7118-6},
  doi = {10.1145/3377325.3377520},
  url = {https://dl.acm.org/doi/pdf/10.1145/3377325.3377520},
  abstract = {Recommender Systems have seen substantial progress in terms of algorithmic sophistication recently. Yet, the systems mostly act as black boxes and are limited in their capacity to explain why an item is recommended. In many cases recommendations methods are employed in scenarios where users not only rate items, but also convey their opinion on various relevant aspects, for instance by the means of textual reviews. Such user-generated content can serve as a useful source for deriving explanatory information to increase system intelligibility and, thereby, the user’s understanding. We propose a recommendation and explanation method that exploits the comprehensiveness of textual data to make the underlying criteria and mechanisms that lead to a recommendation more transparent. Concretely, the method incorporates neural memories that store aspect-related opinions extracted from raw review data. We apply attention mechanisms to transparently write and read information from memory slots. Besides customary offline experiments, we conducted an extensive user study. The results indicate that our approach achieves a higher overall quality of explanations compared to a state-of-the-art baseline. Utilizing Structural Equation Modeling, we additionally reveal three linked key factors that constitute explanation quality: Content adequacy, presentation adequacy, and linguistic adequacy.}
}


@inproceedings{ubo_mods_00142455,
  author = {Loepp, Benedikt and Donkers, Tim and Kleemann, Timm and Ziegler, Jürgen},
  title = {Impact of Consuming Suggested Items on the Assessment of Recommendations in User Studies on Recommender Systems},
  booktitle = {Proceedings of the 28th International Joint Conference on Artificial Intelligence (IJCAI ’19)},
  year = {2019},
  publisher = {IJCAI Organization},
  pages = {6201–6205},
  keywords = {Recommender Systems},
  doi = {10.24963/ijcai.2019/863},
  url = {https://doi.org/10.24963/ijcai.2019/863},
  abstract = {User studies are increasingly considered important in research on recommender systems. Although participants typically cannot consume any of the recommended items, they are often asked to assess the quality of recommendations and of other aspects related to user experience by means of questionnaires. Not being able to listen to recommended songs or to watch suggested movies, might however limit the validity of the obtained results. Consequently, we have investigated the effect of consuming suggested items. In two user studies conducted in different domains, we showed that consumption may lead to differences in the assessment of recommendations and in questionnaire answers. Apparently, adequately measuring user experience is in some cases not possible without allowing users to consume items. On the other hand, participants sometimes seem to approximate the actual value of recommendations reasonably well depending on domain and provided information.}
}


@inproceedings{ubo_mods_00136811,
  author = {Kunkel, Johannes and Donkers, Tim and Michael, Lisa and Barbu, Catalin-Mihai and Ziegler, Jürgen},
  title = {Let Me Explain: Impact of Personal and Impersonal Explanations on Trust in Recommender Systems},
  booktitle = {Proceedings of the 37th International Conference on Human Factors in Computing Systems (CHI ’19)},
  year = {2019},
  publisher = {ACM},
  address = {New York},
  pages = {487:1–487:12},
  isbn = {978-1-4503-5970-2},
  doi = {10.1145/3290605.3300717},
  url = {https://doi.org/10.1145/3290605.3300717},
  abstract = {Trust in a Recommender System (RS) is crucial for its overall success. However, it remains underexplored whether users trust personal recommendation sources (i.e. other humans) more than impersonal sources (i.e. conventional RS), and, if they do, whether the perceived quality of explanation provided account for the difference. We conducted an empirical study in which we compared these two sources of recommendations and explanations. Human advisors were asked to explain movies they recommended in short texts while the RS created explanations based on item similarity. Our experiment comprised two rounds of recommending. Over both rounds the quality of explanations provided by users was assessed higher than the quality of the system’s explanations. Moreover, explanation quality significantly influenced perceived recommendation quality as well as trust in the recommendation source. Consequently, we suggest that RS should provide richer explanations in order to increase their perceived recommendation quality and trustworthiness.}
}


@article{ubo_mods_00109856,
  author = {Loepp, Benedikt and Donkers, Tim and Kleemann, Timm and Ziegler, Jürgen},
  volume = {121},
  pages = {21–41},
  title = {Interactive Recommending with Tag-Enhanced Matrix Factorization (TagMF)},
  journal = {International Journal of Human Computer Studies},
  year = {2019},
  keywords = {Collaborative Filtering, Empirical studies, Human factors, Interactive recommending, Matrix Factorization, Recommender Systems, Tags, User experience, User interfaces, User profiles},
  doi = {10.1016/j.ijhcs.2018.05.002},
  url = {https://doi.org/10.1016/j.ijhcs.2018.05.002},
  abstract = {We introduce TagMF, a model-based Collaborative Filtering method that aims at increasing transparency and offering richer interaction possibilities in current Recommender Systems. Model-based Collaborative Filtering is currently the most popular method that predominantly uses Matrix Factorization: This technique achieves high accuracy in recommending interesting items to individual users by learning latent factors from implicit feedback or ratings the community of users provided for the items. However, the model learned and the resulting recommendations can neither be explained, nor can users be enabled to influence the recommendation process except by rating (more) items. In TagMF, we enhance a latent factor model with additional content information, specifically tags users provided for the items. The main contributions of our method are to use this integrated model to elucidate the hidden semantics of the latent factors and to let users interactively control recommendations by changing the influence of the factors through easily comprehensible tags: Users can express their interests, interactively manipulate results, and critique recommended items—at cold-start when no historical data is yet available for a new user, as well as in case a long-term profile representing the current user’s preferences already exists. To validate our method, we performed offline experiments and conducted two empirical user studies where we compared a recommender that employs TagMF against two established baselines, standard Matrix Factorization based on ratings, and a purely tag-based interactive approach. This user-centric evaluation confirmed that enhancing a model-based method with additional information positively affects perceived recommendation quality. Moreover, recommendations were considered more transparent and users were more satisfied with their final choice. Overall, learning an integrated model and implementing the interactive features that become possible as an extension to contemporary systems with TagMF appears beneficial for the subjective assessment of several system aspects, the level of control users are able to exert over the recommendation process, as well as user experience in general.}
}


@inproceedings{ubo_mods_00116566,
  author = {Loepp, Benedikt and Donkers, Tim and Kleemann, Timm and Ziegler, Jürgen},
  title = {Impact of Item Consumption on Assessment of Recommendations in User Studies},
  booktitle = {Proceedings of the 12th ACM Conference on Recommender Systems (RecSys ’18)},
  year = {2018},
  publisher = {ACM},
  address = {New York, NY, USA},
  pages = {49–53},
  keywords = {User Studies},
  isbn = {978-1-4503-5901-6},
  doi = {10.1145/3240323.3240375},
  url = {https://dl.acm.org/doi/10.1145/3240323.3240375?cid=87958660357},
  abstract = {In user studies of recommender systems, participants typically cannot consume the recommended items. Still, they are asked to assess recommendation quality and other aspects related to user experience by means of questionnaires. Without having listened to recommended songs or watched suggested movies, however, this might be an error-prone task, possibly limiting validity of results obtained in these studies. In this paper, we investigate the effect of actually consuming the recommended items. We present two user studies conducted in different domains showing that in some cases, differences in the assessment of recommendations and in questionnaire results occur. Apparently, it is not always possible to adequately measure user experience without allowing users to consume items. On the other hand, depending on domain and provided information, participants sometimes seem to approximate the actual value of recommendations reasonably well.}
}


@inproceedings{ubo_mods_00114820,
  author = {Naveed, Sidra and Donkers, Tim and Ziegler, Jürgen},
  title = {Argumentation-based explanations in recommender systems: Conceptual framework and empirical results},
  booktitle = {UMAP 2018 - Adjunct Publication of the 26th Conference on User Modeling, Adaptation and Personalization},
  year = {2018},
  address = {New York, NY, USA},
  publisher = {ACM},
  pages = {293–298},
  keywords = {User-centered},
  isbn = {9781450357845},
  doi = {10.1145/3213586.3225240}
}


@inproceedings{ubo_mods_00106122,
  author = {Kunkel, Johannes and Donkers, Tim and Barbu, Catalin-Mihai and Ziegler, Jürgen},
  booktitle = {2nd Workshop on Theory-Informed User Modeling for Tailoring and Personalizing Interfaces (HUMANIZE)},
  title = {Trust-Related Effects of Expertise and Similarity Cues in Human-Generated Recommendations},
  year = {2018},
  keywords = {Structural Equation Modeling},
  url = {http://ceur-ws.org/Vol-2068/humanize5.pdf},
  abstract = {A user’s trust in recommendations plays a central role in the acceptance or rejection of a recommendation. One factor that influences  trust  is  the  source  of  the  recommendations. In this paper we describe an empirical study that investigates the trust-related influence of social presence arising in two scenarios: human-generated recommendations and automated recommending. We further compare visual cues indicating the expertise of a human recommendation source and its similarity with the target user, and evaluate their influence on trust. Our analysis indicates that even subtle visual cues can signal expertise and similarity effectively, thus influencing a user’s trust in recommendations. These findings suggest that automated recommender systems could benefit from the inclusion of social components–especially when conveying characteristics of the recommendation source. Thus, more informative and persuasive recommendation interfaces may be designed using such a mixed approach.}
}


@inproceedings{ubo_mods_00104370,
  author = {Donkers, Tim and Loepp, Benedikt and Ziegler, Jürgen},
  booktitle = {Proceedings of the 1st Workshop on Explainable Smart Systems (ExSS ’18)},
  title = {Explaining Recommendations by Means of User Reviews},
  year = {2018},
  keywords = {Explanations},
  url = {http://ceur-ws.org/Vol-2068/exss8.pdf},
  abstract = {The field of recommender systems has seen substantial progress in recent years in terms of algorithmic sophistication and quality of recommendations as measured by standard accuracy metrics. Yet, the systems mainly act as black boxes for the user and are limited in their capability to explain why certain items are recommended. This is particularly true when using abstract models which do not easily lend themselves for providing explanations. In many cases, however, recommendation methods are employed in scenarios where users not only rate items, but also provide feedback in the form of tags or written product reviews. Such user-generated content can serve as a useful source for deriving explanatory information that may increase the user’s understanding of the underlying criteria and mechanisms that led to the results. In this paper, we describe a set of developments we undertook to couple such textual content with common recommender techniques. These developments have moved from integrating tags into collaborative filtering to employing topics and sentiments expressed in reviews to increase transparency and to give users more control over the recommendation process. Furthermore, we describe our current research goals and a first concept concerning extraction of more complex argumentative explanations from textual reviews and presenting them to users.}
}


@inproceedings{ubo_mods_00090488,
  author = {Donkers, Tim and Loepp, Benedikt and Ziegler, Jürgen},
  chapter = {},
  title = {Sequential User-based Recurrent Neural Network Recommendations},
  year = {2017},
  publisher = {ACM},
  address = {New York, NY, USA},
  pages = {152–160},
  keywords = {Sequential Recommendations},
  doi = {10.1145/3109859.3109877},
  url = {https://dl.acm.org/doi/10.1145/3109859.3109877?cid=87958660357},
  abstract = {Recurrent Neural Networks are powerful tools for modeling sequences. They are flexibly extensible and can incorporate various kinds of information including temporal order. These properties make them well suited for generating sequential recommendations. In this paper, we extend Recurrent Neural Networks by considering unique characteristics of the Recommender Systems domain. One of these characteristics is the explicit notion of the user recommendations are specifically generated for. We show how individual users can be represented in addition to sequences of consumed items in a new type of Gated Recurrent Unit to effectively produce personalized next item recommendations. Offline experiments on two real-world datasets indicate that our extensions clearly improve objective performance when compared to state-of-the-art recommender algorithms and to a conventional Recurrent Neural Network.},
  booktitle = {Proceedings of the 11th ACM Conference on Recommender Systems (RecSys ’17)}
}


@inproceedings{ubo:72538,
  author = {Donkers, Tim and Loepp, Benedikt and Ziegler, Jürgen},
  chapter = {},
  title = {Towards Understanding Latent Factors and User Profiles by Enhancing Matrix Factorization with Tags},
  year = {2016},
  keywords = {Explanations},
  url = {http://ceur-ws.org/Vol-1688/paper-20.pdf},
  abstract = {With the interactive recommending approach we have recently proposed, users are given more control over model-based Collaborative Filtering while the results are perceived as more transparent. Integrating the latent factors derived by Matrix Factorization with tags users provided for the items has, however, even more advantages. In this paper, we show how general understanding of the abstract factor space, and of user and item positions inside it, can benefit from the semantics introduced by considering additional information. Moreover, our approach allows us to explain the user’s (former latent) preference profile by means of tags.},
  booktitle = {Poster Proceedings of the 10th ACM Conference on Recommender Systems (RecSys ’16)}
}


@inproceedings{ubo:57156,
  author = {Donkers, Tim and Loepp, Benedikt and Ziegler, Jürgen},
  chapter = {},
  title = {Merging Latent Factors and Tags to Increase Interactive Control of Recommendations},
  year = {2015},
  abstract = {We describe a novel approach that integrates user-generated tags with standard Matrix Factorization to allow users to interactively control recommendations. The tag information is incorporated during the learning phase and relates to the automatically derived latent factors. Thus, the system can change an item’s score whenever the user adjusts a tag’s weight. We implemented a prototype and performed a user study showing that this seems to be a promising way for users to interactively manipulate the set of items recommended based on their user profile or in cold-start situations.},
  url = {http://ceur-ws.org/Vol-1441/recsys2015_poster12.pdf},
  booktitle = {Poster Proceedings of the 9th ACM Conference on Recommender Systems (RecSys ’15)}
}


