@misc{cogprints1596,
editor = {V. Honavar and L. Uhr},
title = {Learned Categorical Perception in Neural Nets: Implications for Symbol Grounding},
author = {Stevan Harnad and Stephen J. Hanson and Joseph Lubin},
publisher = {Academic Press},
year = {1995},
pages = {191--206},
journal = {Symbol Processors and Connectionist Network Models in Artificial Intelligence and Cognitive Modelling: Steps Toward Principled Integration},
keywords = {Neural nets, symbol grounding, categorical perception },
url = {http://cogprints.org/1596/},
abstract = {After people learn to sort objects into categories they see them differently. Members of
the same category look more alike and members of different categories look more different. This
phenomenon of within-category compression and between-category separation in similarity space is
called categorical perception (CP). It is exhibited by human subjects, animals and neural net models.
In backpropagation nets trained first to auto-associate 12 stimuli varying along a one-dimensional
continuum and then to sort them into 3 categories, CP arises as a natural side-effect because of four
factors: (1) Maximal interstimulus separation in hidden-unit space during auto-association learning, (2)
movement toward linear separability during categorization learning, (3) inverse-distance repulsive force
exerted by the between-category boundary, and (4) the modulating effects of input iconicity, especially
in interpolating CP to untrained regions of the continuum. Once similarity space has been "warped" in
this way, the compressed and separated "chunks" have symbolic labels which could then be combined
into symbol strings that constitute propositions about objects. The meanings of such symbolic
representations would be "grounded" in the system's capacity to pick out from their sensory
projections the object categories that the propositions were about. }
}