@misc{cogprints493,
editor = {O. and Smagt Omidvar},
title = {Visual feedback in motion},
author = {P. van der Smagt and F. Groen},
publisher = {Academic Press, Boston, Massachusetts},
year = {1997},
pages = {37--73},
journal = {Neural Systems for Robotics},
url = {http://cogprints.org/493/},
abstract = {In this chapter we introduce a method for model-free monocular visual guidance of a robot arm. The robot arm, with a single camera in its end effector, should be positioned above a stationary target. It is shown that a trajectory can be planned in visual space by using components of the optic flow, and this trajector can be translated to joint torques by a self-learning neural network. No model of the robot, camera, or environment is used. The method reaches a high grasping accuracy after only a few trials.}
}