@incollection{vdv07csc,
title = {Categorization and reinforcement learning: State identification in reinforcement learning and network reinforcement learning},
author = { Vladislav Daniel Veksler and Wayne D. Gray and Michael J. Schoelles},
editor = {McNamara, D. S. and Trafton, J. G.},
year = {2007},
date = {2007-01-01},
booktitle = {29th Annual Meeting of the Cognitive Science Society},
pages = {689-694},
publisher = {Cognitive Science Society},
address = {Austin, TX},
abstract = {We present Network Reinforcement Learning (NRL) as more efficient and robust than traditional reinforcement learning in complex environments. Combined with Configural Memory (Pearce, 1994), NRL can generalize from its experiences to novel stimuli, and learn how to deal with anomalies as well. We show how configural memory with NRL accounts for human and monkey data on a classic categorization paradigm. Finally, we argue for why the suggested mechanism is better than other reinforcement learning and categorization models for cognitive agents and AI.},
keywords = {artificial intelligence, categorization, category learning, cognitive architectures, cognitive modeling, configural, reinforcement learning, unsupervised learning},
pubstate = {published},
tppubtype = {incollection}
}
We present Network Reinforcement Learning (NRL) as more efficient and robust than traditional reinforcement learning in complex environments. Combined with Configural Memory (Pearce, 1994), NRL can generalize from its experiences to novel stimuli, and learn how to deal with anomalies as well. We show how configural memory with NRL accounts for human and monkey data on a classic categorization paradigm. Finally, we argue for why the suggested mechanism is better than other reinforcement learning and categorization models for cognitive agents and AI.