@article {93, title = {Ultra-fast object recognition from few spikes}, number = {2005-022}, year = {2005}, month = {07/2005}, pages = {1-31}, institution = {MIT}, address = {Cambridge, MA}, abstract = {

Understanding the complex brain computations leading to object recognition requires quantitatively characterizing the information represented in inferior temporal cortex (IT), the highest stage of the primate visual stream. A read-out technique based on a trainable classifier is used to characterize the neural coding of selectivity and invariance at the population level. The activity of very small populations of independently recorded IT neurons (~100 randomly selected cells) over very short time intervals (as small as 12.5 ms) contains surprisingly accurate and robust information about both object \Â\‘identity\Â\’ and \Â\‘category\Â\’, which is furthermore highly invariant to object position and scale. Significantly, selectivity and invariance are present even for novel objects, indicating that these properties arise from the intrinsic circuitry and do not require object-specific learning. Within the limits of the technique, there is no detectable difference in the latency or temporal resolution of the IT information supporting so-called \Â\‘categorization\Â\’ (a.k. basic level) and \Â\‘identification\Â\’ (a.k. subordinate level) tasks. Furthermore, where information, in particular information about stimulus location and scale, can also be read-out from the same small population of IT neurons. These results show how it is possible to decode invariant object information rapidly, accurately and robustly from a small population in IT and provide insights into the nature of the neural code for different kinds of object-related information.

}, keywords = {AI, inferior temporal cortex, neural coding, object recognition}, issn = {2005-022}, url = {https://dspace.mit.edu/handle/1721.1/30556}, author = {Hung, Chou P. and Kreiman, Gabriel and Poggio, Tomaso and DiCarlo, James J.} }