@article{Simoncini2012More,
    abstract = {Moving objects generate motion information at different scales, but it is not known how the brain pools all of this information to reconstruct object speed and whether pooling depends on the purpose for which the information will be used. Here the authors find task-dependent differences in pooling that can be explained by an adaptive gain control mechanism.},
    author = {Simoncini, Claudio and Perrinet, Laurent U. and Montagnini, Anna and Mamassian, Pascal and Masson, Guillaume S.},
    citeulike-article-id = {11608126},
    citeulike-linkout-0 = {http://dx.doi.org/10.1038/nn.3229},
    citeulike-linkout-1 = {http://dx.doi.org/10.1038/nn.3229},
    date-added = {2012-11-30 09:34:58},
    day = {30},
    doi = {10.1038/nn.3229},
    issn = {1097-6256},
    journal = {Nature Neuroscience},
    keywords = {active\_eye\_movements, eye\_movements, freemove, gain\_control, pursuit, sanz12jnp, spem},
    month = sep,
    number = {11},
    pages = {1596--1603},
    priority = {0},
    publisher = {Nature Publishing Group},
    title = {More is not always better: adaptive gain control explains dissociation between perception and action},
    url = {http://dx.doi.org/10.1038/nn.3229},
    volume = {15},
    year = {2012}
}

@article{Stocker06,
    abstract = {Human visual speed perception is qualitatively consistent with a Bayesian observer that optimally combines noisy measurements with a prior preference for lower speeds. Quantitative validation of this model, however, is difficult because the precise noise characteristics and prior expectations are unknown. Here, we present an augmented observer model that accounts for the variability of subjective responses in a speed discrimination task. This allowed us to infer the shape of the prior probability as well as the internal noise characteristics directly from psychophysical data. For all subjects, we found that the fitted model provides an accurate description of the data across a wide range of stimulus parameters. The inferred prior distribution shows significantly heavier tails than a Gaussian, and the amplitude of the internal noise is approximately proportional to stimulus speed and depends inversely on stimulus contrast. The framework is general and should prove applicable to other experiments and perceptual modalities.},
    author = {Stocker, Alan A. and Simoncelli, Eero P.},
    citeulike-article-id = {10432490},
    citeulike-linkout-0 = {http://www.nature.com/neuro/journal/v9/n4/abs/nn1669.html},
    citeulike-linkout-1 = {http://dx.doi.org/10.1038/nn1669},
    date-added = {2012-07-16 15:45:28},
    day = {19},
    doi = {10.1038/nn1669},
    issn = {1097-6256},
    journal = {Nature Neuroscience},
    keywords = {freemove, khoei12jpp, motion, motion-clouds, motion\_clouds, neural, noise, prior, psychophysics, sanz12jnp},
    month = mar,
    number = {4},
    pages = {578--585},
    priority = {5},
    title = {Noise characteristics and prior expectations in human visual speed perception},
    url = {http://www.nature.com/neuro/journal/v9/n4/abs/nn1669.html},
    volume = {9},
    year = {2006}
}

@article{Sanz12,
    abstract = {Choosing an appropriate set of stimuli is essential to characterize the response of a sensory system to a particular functional dimension, such as the eye movement following the motion of a visual scene. Here, we describe a framework to generate random texture movies with controlled information content, i.e., Motion Clouds. These stimuli are defined using a generative model that is based on controlled experimental parametrization. We show that Motion Clouds correspond to dense mixing of localized moving gratings with random positions. Their global envelope is similar to natural-like stimulation with an approximate full-field translation corresponding to a retinal slip. We describe the construction of these stimuli mathematically and propose an open-source Python-based implementation. Examples of the use of this framework are shown. We also propose extensions to other modalities such as color vision, touch, and audition.},
    author = {Leon, P. S. and Vanzetta, I. and Masson, G. S. and Perrinet, L. U.},
    citeulike-article-id = {10461699},
    citeulike-linkout-0 = {http://dx.doi.org/10.1152/jn.00737.2011},
    citeulike-linkout-1 = {http://jn.physiology.org/content/early/2012/03/10/jn.00737.2011.abstract},
    citeulike-linkout-2 = {http://jn.physiology.org/content/early/2012/03/10/jn.00737.2011.full.pdf},
    citeulike-linkout-3 = {http://view.ncbi.nlm.nih.gov/pubmed/22423003},
    citeulike-linkout-4 = {http://www.hubmed.org/display.cgi?uids=22423003},
    date-added = {2012-04-10 12:07:28},
    day = {14},
    doi = {10.1152/jn.00737.2011},
    issn = {1522-1598},
    journal = {Journal of Neurophysiology},
    keywords = {sanz12jnp},
    month = mar,
    number = {11},
    pages = {3217--3226},
    pmid = {22423003},
    priority = {0},
    publisher = {American Physiological Society},
    title = {Motion clouds: model-based stimulus synthesis of natural-like random textures for the study of motion perception},
    url = {http://dx.doi.org/10.1152/jn.00737.2011},
    volume = {107},
    year = {2012}
}

@inproceedings{Perrinet11sfn,
    abstract = {Oriented edges in images of natural scenes tend to be aligned in collinear or co-circular arrangements, with lines and smooth curves more common than other possible arrangements of edges (Geisler et al., Vis Res 41:711-24, 2001). The visual system appears to take advantage of this prior information, and human contour detection and grouping performance is well predicted by such an "association field" (Field et al., Vis Res 33:173-93, 1993). One possible candidate substrate for implementing an association field in mammals is the set of long-range lateral connections between neurons in the primary visual cortex (V1), which could act to facilitate detection of contours matching the association field, and/or inhibit detection of other contours (Choe and Miikkulainen, Biol Cyb 90:75-88, 2004). To fill this role, the lateral connections would need to be orientation specific and aligned along contours, and indeed such an arrangement has been found in tree shrew primary visual cortex (Bosking et al., J Neurosci 17:2112-27, 1997). However, it is not yet known whether these patterns develop as a result of visual experience, or are simply hard-wired to be appropriate for the statistics of natural scenes. To investigate this issue, we examined the properties of the visual environment of laboratory animals, to determine whether the observed connection patterns are more similar to the statistics of the rearing environment or of a natural habitat. Specifically, we analyzed the cooccurence statistics of edge elements in images of natural scenes, and compared them to corresponding statistics for images taken from within the rearing environment of the animals in the Bosking et al. (1997) study. We used a modified version of the algorithm from Geisler et al. (2001), with a more general edge extraction algorithm that uses sparse coding to avoid multiple responses to a single edge. Collinearity and co-circularity results for natural images replicated qualitatively the results from Geisler et al. (2001), confirming that prior information about continuations appeared consistently in natural images. However, we find that the largely man-made environment in which these animals were reared has a significantly higher probability of collinear edge elements. We thus predict that if the lateral connection patterns are due to visual experience, the patterns in wild-raised tree shrews would be very different from those measured by Bosking et al. (1997), with shorter-range correlations and less emphasis on collinear continuations. This prediction can be tested in future experiments on matching groups of animals reared in different environments. {W.H}. Bosking and Y. Zhang and B. Schofield and D. Fitzpatrick (1997) Orientation selectivity and the arrangement of horizontal connections in tree shrew striate cortex Journal of Neuroscience 17:2112-27. {E.M}. Callaway and {L.C}. Katz (1990) Emergence and refinement of clustered horizontal connections in cat striate cortex. Journal of Neuroscience 10:1134–53. Y. Choe and R. Miikkulainen (2004) Contour integration and segmentation with self-organized lateral connections Biological Cybernetics 90:75-88. {D.J}. Field, A. Hayes, and {R.F}. Hess (1993) Contour integration by the human visual system: Evidence for a local "association field", Vision Research 33:173–93. {W.S}. Geisler, {J.S}. Perry, {B.J}. Super, and {D.P}. Gallogly (2001) Edge co-occurrence in natural images predicts contour grouping performance. Vision Research 41:711-24.},
    author = {Perrinet, Laurent and Fitzpatrick, David and Bednar, James A.},
    booktitle = {Society for Neuroscience Abstracts},
    citeulike-article-id = {10476485},
    citeulike-linkout-0 = {http://invibe.net/LaurentPerrinet/Publications/Perrinet11sfn},
    date-added = {2012-03-19 15:06:35},
    editor = {Washington, Www},
    keywords = {sanz12jnp},
    number = {Program No. 530.04},
    priority = {0},
    title = {Edge statistics in natural images versus laboratory animal environments: implications for understanding lateral connectivity in {V1}},
    url = {http://invibe.net/LaurentPerrinet/Publications/Perrinet11sfn},
    year = {2011}
}

@article{TsO90,
    abstract = {A high spatial resolution optical imaging system was developed to visualize cerebral cortical activity in vivo. This method is based on activity-dependent intrinsic signals and does not use voltage-sensitive dyes. Images of the living monkey striate ({VI}) and extrastriate (V2) visual cortex, taken during visual stimulation, were analyzed to yield maps of the distribution of cells with various functional properties. The cytochrome oxidase--rich blobs of V1 and the stripes of V2 were imaged in the living brain. In V2, no ocular dominance organization was seen, while regions of poor orientation tuning colocalized to every other cytochrome oxidase stripe. The orientation tuning of other regions of V2 appeared organized as modules that are larger and more uniform than those in V1.},
    author = {Ts'o, D. Y. and Frostig, R. D. and Lieke, E. E. and Grinvald, A.},
    citeulike-article-id = {10060578},
    citeulike-linkout-0 = {http://dx.doi.org/10.1126/science.2165630},
    citeulike-linkout-1 = {http://www.sciencemag.org/content/249/4967/417.abstract},
    date-added = {2011-11-23 08:53:19},
    doi = {10.1126/science.2165630},
    eprint = {http://www.sciencemag.org/content/249/4967/417.full.pdf},
    journal = {Science},
    keywords = {sanz12jnp},
    number = {4967},
    pages = {417--420},
    priority = {2},
    title = {Functional organization of primate visual cortex revealed by high resolution optical imaging},
    url = {http://www.sciencemag.org/content/249/4967/417.abstract},
    volume = {249},
    year = {1990}
}

@techreport{sanz11_sup,
    author = {Sanz-Leon, Paula},
    citeulike-article-id = {10060576},
    date-added = {2011-11-23 08:53:18},
    institution = {INT},
    keywords = {sanz12jnp},
    month = nov,
    priority = {2},
    title = {Supplementary Material - Motion Clouds},
    year = {2011}
}

@article{Rust2005Spatiotemporal,
    abstract = {Neurons in primary visual cortex (V1) are commonly classified as simple or complex based upon their sensitivity to the sign of stimulus contrast. The responses of both cell types can be described by a general model in which the outputs of a set of linear filters are nonlinearly combined. We estimated the model for a population of V1 neurons by analyzing the mean and covariance of the spatiotemporal distribution of random bar stimuli that were associated with spikes. This analysis reveals an unsuspected richness of neuronal computation within V1. Specifically, simple and complex cell responses are best described using more linear filters than the one or two found in standard models. Many filters revealed by the model contribute suppressive signals that appear to have a predominantly divisive influence on neuronal firing. Suppressive signals are especially potent in direction-selective cells, where they reduce responses to stimuli moving in the nonpreferred direction.},
    address = {Center for Neural Science and New York University, New York, New York 10003.},
    author = {Rust, Nicole C. and Schwartz, Odelia and Movshon, J. Anthony and Simoncelli, Eero P.},
    citeulike-article-id = {10060575},
    citeulike-linkout-0 = {http://dx.doi.org/10.1016/j.neuron.2005.05.021},
    date-added = {2011-11-23 08:53:17},
    day = {16},
    doi = {10.1016/j.neuron.2005.05.021},
    journal = {Neuron},
    keywords = {area-v1, sanz12jnp},
    month = jun,
    number = {6},
    pages = {945--956},
    priority = {3},
    title = {Spatiotemporal Elements of Macaque {V1} Receptive Fields},
    url = {http://dx.doi.org/10.1016/j.neuron.2005.05.021},
    volume = {46},
    year = {2005}
}

@article{Perrone01,
    author = {Perrone, John A. and Thiele, Alexander},
    citeulike-article-id = {10060574},
    date-added = {2011-11-23 08:53:17},
    journal = {Nat. Neurosci.},
    keywords = {sanz12jnp},
    month = may,
    number = {5},
    pages = {526--532},
    priority = {2},
    title = {Speed skills: measuring the visual speed analyzing properties of primate {MT} neurons},
    volume = {4},
    year = {2001}
}

@article{Perrone02,
    author = {Perrone, John A. and Thiele, Alexander},
    citeulike-article-id = {10060573},
    citeulike-linkout-0 = {http://dx.doi.org/10.1016/S0042-6989(02)00029-9},
    citeulike-linkout-1 = {http://www.sciencedirect.com/science/article/pii/S0042698902000299},
    date-added = {2011-11-23 08:53:17},
    doi = {10.1016/S0042-6989(02)00029-9},
    journal = {Vision Res.},
    keywords = {motion, sanz12jnp},
    number = {8},
    pages = {1035--1051},
    priority = {2},
    title = {A model of speed tuning in {MT} neurons},
    url = {http://www.sciencedirect.com/science/article/pii/S0042698902000299},
    volume = {42},
    year = {2002}
}

@article{Peirce07,
    author = {Peirce, Jonathan W.},
    citeulike-article-id = {10060572},
    citeulike-linkout-0 = {http://dx.doi.org/10.1016/j.jneumeth.2006.11.017},
    citeulike-linkout-1 = {http://www.sciencedirect.com/science/article/pii/S0165027006005772},
    date-added = {2011-11-23 08:53:17},
    doi = {10.1016/j.jneumeth.2006.11.017},
    journal = {Journal Neurosci. Meth.},
    keywords = {sanz12jnp, vision},
    number = {1-2},
    pages = {8--13},
    priority = {2},
    title = {{PsychoPy}: Psychophysics software in Python},
    url = {http://www.sciencedirect.com/science/article/pii/S0165027006005772},
    volume = {162},
    year = {2007}
}

@article{Neri98,
    abstract = {One of the more stunning examples of the resourcefulness of human vision is the ability to see 'biological motion', which was first shown with an adaptation of earlier cinematic work: illumination of only the joints of a walking person is enough to convey a vivid, compelling impression of human animation, although the percept collapses to a jumble of meaningless lights when the walker stands still. The information is sufficient to discriminate the sex and other details of the walker, and can be interpreted by young infants. Here we measure the ability of the visual system to integrate this type of motion information over space and time, and compare this capacity with that for viewing simple translational motion. Sensitivity to biological motion increases rapidly with the number of illuminated joints, far more rapidly than for simple motion. Furthermore, this information is summed over extended temporal intervals of up to 3 seconds (eight times longer than for simple motion). The steepness of the summation curves indicates that the mechanisms that analyse biological motion do not integrate linearly over space and time with constant efficiency, as may occur for other forms of complex motion, but instead adapt to the nature of the stimulus.},
    author = {Neri, P. and Morrone, M. C. and Burr, D. C.},
    citeulike-article-id = {10060571},
    citeulike-linkout-0 = {http://dx.doi.org/10.1038/27661},
    date-added = {2011-11-23 08:53:16},
    day = {29},
    doi = {10.1038/27661},
    journal = {Nature},
    keywords = {motion\_biological, sanz12jnp},
    month = oct,
    number = {6705},
    pages = {894--896},
    priority = {2},
    title = {Seeing biological motion.},
    url = {http://dx.doi.org/10.1038/27661},
    volume = {395},
    year = {1998}
}

@article{Masson02,
    author = {Masson, Guillaume S. and Castet, E.},
    citeulike-article-id = {10060570},
    date-added = {2011-11-23 08:53:16},
    journal = {Journal of Neuroscience},
    keywords = {sanz12jnp},
    pages = {5149--5163},
    priority = {2},
    title = {Parallel motion processing for the initiation of short-latency ocular following in humans.},
    volume = {22},
    year = {2002}
}

@article{Jasinschi92,
    abstract = {Perceptual motion transparency occurs whenever two or more patterns are seen moving at different depth levels, such that we can see one pattern move across the others, and perceptual motion coherence occurs when we see a single motion. We present here a model of perceptual motion transparency and coherence that consists of three stages: (i) measure the normal velocity along contours or the velocity of features such as corners or line end points; (ii) take the intersection, in velocity space, of all possible pairs of constraint lines associated with the normal velocity components; and (iii) combine the results of steps (i) and (ii) in the velocity histogram, which is the plot of the total number of votes for each velocity in velocity space. For two patterns we perceive motion coherence, transparency, or a mixture of both types of motion depending on whether the velocity histogram is unimodal, bimodal, or trimodal. According to our model we perceive motion transparency or coherence depending on the total number of prominent peaks of the velocity histogram, where each peak is located at the position corresponding to the velocity of one of the patterns or of coherent motion. We show that the number of prominent peaks in the velocity histogram depends on the error in the measurement of the local velocity and on the relative orientation of the local velocities along contours; this relative orientation encodes contour shape. Our model differs from current motion theories in that it describes the perception of motion not as a result of local velocity extraction but instead as the result of the integration of local velocity and geometrical shape information across different points of the image and across superimposed patterns. This model allows for the occurrence of mixed motion perception, which arises from the combination of the velocity information associated with motion transparency and coherence. We have tested this model through computational and psychophysical experiments done with line patterns. As a result of these experiments we conjecture that the human visual system may use at least three stages to process image velocity.},
    author = {Jasinschi, R. and Rosenfeld, A. and Sumit, K.},
    citeulike-article-id = {10060569},
    citeulike-linkout-0 = {http://dx.doi.org/10.1364/JOSAA.9.001865},
    citeulike-linkout-1 = {http://josaa.osa.org/abstract.cfm?URI=josaa-9-11-1865},
    date-added = {2011-11-23 08:53:15},
    doi = {10.1364/JOSAA.9.001865},
    journal = {J. Opt. Soc. Am. A},
    keywords = {sanz12jnp},
    month = nov,
    number = {11},
    pages = {1865--1879},
    priority = {2},
    publisher = {OSA},
    title = {Perceptual motion transparency: the role of geometrical information},
    url = {http://josaa.osa.org/abstract.cfm?URI=josaa-9-11-1865},
    volume = {9},
    year = {1992}
}

@article{Burr11,
    author = {Burr, David and Thompson, Peter},
    citeulike-article-id = {10060568},
    citeulike-linkout-0 = {http://www.ncbi.nlm.nih.gov/pubmed/21324335},
    date-added = {2011-11-23 08:53:12},
    journal = {Vision Res.},
    keywords = {sanz12jnp},
    number = {13},
    pages = {1431--1456},
    priority = {2},
    publisher = {Elsevier Ltd},
    title = {Motion psychophysics: 1985-2010},
    url = {http://www.ncbi.nlm.nih.gov/pubmed/21324335},
    volume = {51},
    year = {2011}
}

@article{Bradley05,
    abstract = {The small visual area known as {MT} or V5 has played a major role in our understanding of the primate cerebral cortex. This area has been historically important in the concept of cortical processing streams and the idea that different visual areas constitute highly specialized representations of visual information. {MT} has also proven to be a fertile culture dish--full of direction- and disparity-selective neurons--exploited by many labs to study the neural circuits underlying computations of motion and depth and to examine the relationship between neural activity and perception. Here we attempt a synthetic overview of the rich literature on {MT} with the goal of answering the question, What does {MT} do?},
    address = {Department of Neurobiology, Harvard Medical School, Boston, MA 02115-5701, USA. rborn@hms.harvard.edu},
    author = {Born, Richard T. and Bradley, David C.},
    citeulike-article-id = {10060567},
    citeulike-linkout-0 = {http://dx.doi.org/10.1146/annurev.neuro.26.041002.131052},
    date-added = {2011-11-23 08:53:12},
    doi = {10.1146/annurev.neuro.26.041002.131052},
    journal = {Ann. Rev. of Neurosci.},
    keywords = {sanz12jnp},
    number = {1},
    pages = {157--189},
    priority = {2},
    title = {Structure and function of visual area {MT}.},
    url = {http://dx.doi.org/10.1146/annurev.neuro.26.041002.131052},
    volume = {28},
    year = {2005}
}

@article{Blasdel86,
    author = {Blasdel, Gary G. and Salama, Guy},
    citeulike-article-id = {10060566},
    citeulike-linkout-0 = {http://dx.doi.org/10.1038/321579a0},
    date-added = {2011-11-23 08:53:12},
    doi = {10.1038/321579a0},
    journal = {Nature},
    keywords = {sanz12jnp},
    number = {6070},
    pages = {579--585},
    priority = {2},
    title = {Voltage-sensitive dyes reveal a modular organization in monkey striate cortex},
    url = {http://dx.doi.org/10.1038/321579a0},
    volume = {321},
    year = {1986}
}

@article{Nishimoto11,
    abstract = {Area {MT} has been an important target for studies of motion processing. However, previous neurophysiological studies of {MT} have used simple stimuli that do not contain many of the motion signals that occur during natural vision. In this study we sought to determine whether views of area {MT} neurons developed using simple stimuli can account for {MT} responses under more naturalistic conditions. We recorded responses from macaque area {MT} neurons during stimulation with naturalistic movies. We then used a quantitative modeling framework to discover which specific mechanisms best predict neuronal responses under these challenging conditions. We find that the simplest model that accurately predicts responses of {MT} neurons consists of a bank of V1-like filters, each followed by a compressive nonlinearity, a divisive nonlinearity, and linear pooling. Inspection of the fit models shows that the excitatory receptive fields of {MT} neurons tend to lie on a single plane within the three-dimensional spatiotemporal frequency domain, and suppressive receptive fields lie off this plane. However, most excitatory receptive fields form a partial ring in the plane and avoid low temporal frequencies. This receptive field organization ensures that most {MT} neurons are tuned for velocity but do not tend to respond to ambiguous static textures that are aligned with the direction of motion. In sum, {MT} responses to naturalistic movies are largely consistent with predictions based on simple stimuli. However, models fit using naturalistic stimuli reveal several novel properties of {MT} receptive fields that had not been shown in prior experiments.},
    author = {Nishimoto, Shinji and Gallant, Jack L.},
    citeulike-article-id = {9922411},
    citeulike-linkout-0 = {http://dx.doi.org/10.1523/JNEUROSCI.6801-10.2011},
    citeulike-linkout-1 = {http://www.jneurosci.org/content/31/41/14551.abstract},
    citeulike-linkout-2 = {http://www.jneurosci.org/content/31/41/14551.full.pdf},
    citeulike-linkout-3 = {http://www.ncbi.nlm.nih.gov/pmc/articles/PMC3338855/},
    citeulike-linkout-4 = {http://view.ncbi.nlm.nih.gov/pubmed/21994372},
    citeulike-linkout-5 = {http://www.hubmed.org/display.cgi?uids=21994372},
    date-added = {2011-11-01 11:59:54},
    day = {12},
    doi = {10.1523/JNEUROSCI.6801-10.2011},
    issn = {1529-2401},
    journal = {J. Neurosci.},
    keywords = {area-mt, motion\_detection, natural\_scenes, sanz12jnp},
    month = oct,
    number = {41},
    pages = {14551--14564},
    pmcid = {PMC3338855},
    pmid = {21994372},
    priority = {5},
    publisher = {Society for Neuroscience},
    title = {A Three-Dimensional Spatiotemporal Receptive Field Model Explains Responses of Area {MT} Neurons to Naturalistic Movies},
    url = {http://dx.doi.org/10.1523/JNEUROSCI.6801-10.2011},
    volume = {31},
    year = {2011}
}

@article{Torralba03,
    abstract = {In this paper we study the statistical properties of natural images belonging to different categories and their relevance for scene and object categorization tasks. We discuss how second-order statistics are correlated with image categories, scene scale and objects. We propose how scene categorization could be computed in a feedforward manner in order to provide top-down and contextual information very early in the visual processing chain. Results show how visual categorization based directly on low-level features, without grouping or segmentation stages, can benefit object localization and identification. We show how simple image statistics can be used to predict the presence and absence of objects in the scene before exploring the image. In this paper we study the statistical properties of natural images belonging to different categories and their relevance for scene and object categorization tasks. We discuss how second-order statistics are correlated with image categories, scene scale and objects. We propose how scene categorization could be computed in a feedforward manner in order to provide top-down and contextual information very early in the visual processing chain. Results show how visual categorization based directly on low-level features, without grouping or segmentation stages, can benefit object localization and identification. We show how simple image statistics can be used to predict the presence and absence of objects in the scene before exploring the image.},
    author = {Torralba, Antonio and Oliva, Aude},
    booktitle = {Network: Computation in Neural Systems},
    citeulike-article-id = {9885654},
    citeulike-linkout-0 = {http://dx.doi.org/10.1088/0954-898X\_14\_3\_302},
    citeulike-linkout-1 = {http://www.informahealthcare.com/doi/abs/10.1088/0954-898X\_14\_3\_302},
    date-added = {2011-10-10 09:28:18},
    day = {1},
    doi = {10.1088/0954-898X\_14\_3\_302},
    journal = {Network},
    keywords = {association\_field, assofield, categorization, natural, natural\_scenes, perrinet11sfn, sanz12jnp, statistics},
    month = jan,
    number = {3},
    pages = {391--412},
    priority = {2},
    publisher = {Informa Clin Med},
    title = {Statistics of natural image categories},
    url = {http://dx.doi.org/10.1088/0954-898X\_14\_3\_302},
    volume = {14},
    year = {2003}
}

@article{PhillipsTodd2010,
    author = {Phillips, F. and Todd, J. T.},
    citeulike-article-id = {9562727},
    date-added = {2011-07-19 14:56:39},
    journal = {J. Vis.},
    keywords = {features, frequency, phase, recognition, sanz12jnp, spatial, spectrum, texture},
    number = {6},
    pages = {6},
    priority = {2},
    title = {Texture discrimination based on global feature alignments},
    volume = {10},
    year = {2010}
}

@article{VictorComte1996,
    author = {Victor, J. D. and Conte, M. M.},
    citeulike-article-id = {9562726},
    date-added = {2011-07-19 14:56:38},
    journal = {Vision Res.},
    keywords = {discrimination, isodipole, phase, sanz12jnp, spectrum, texture, vep},
    number = {11},
    pages = {1615--1631},
    priority = {2},
    title = {The role of high-order phase correlations in texture processing},
    volume = {36},
    year = {1996}
}

@article{Hansen06,
    author = {Hansen, B. C. and Hess, R. F.},
    citeulike-article-id = {9562725},
    date-added = {2011-07-19 14:56:38},
    journal = {J. Vis.},
    keywords = {association\_field, assofield, contour, field, information, integration, perrinet11sfn, phase, sanz12jnp, segmentation, texture},
    number = {5},
    pages = {594--615},
    priority = {2},
    title = {The role of spatial phase in texture segmentation and contour integration},
    volume = {5},
    year = {2006}
}

@article{Hess03,
    author = {Hess, R. F. and Hayes, A. and Field, D. J.},
    citeulike-article-id = {9562724},
    date-added = {2011-07-19 14:56:38},
    journal = {J. Physiol. Paris},
    keywords = {association\_field, assofield, contour, discrimination, field, integration, perrinet11sfn, sanz12jnp, segmentation, texture},
    number = {2-3},
    pages = {105--119},
    priority = {2},
    title = {Contour integration and cortical processing},
    volume = {97},
    year = {2003}
}

@article{Priebe06,
    author = {Priebe, N. and Lisberger, S. and Movshon, J. Anthony},
    citeulike-article-id = {9562723},
    date-added = {2011-07-19 14:56:38},
    journal = {J. Neurosci.},
    keywords = {cells, complex, sanz12jnp, spatiotemporal-filtering, speed, tuning, v1},
    pages = {2941--2950},
    priority = {2},
    title = {Tuning for spatiotemporal frequency and speed in directionally selective neurons of macaque striate cortex.},
    volume = {26},
    year = {2006}
}

@article{Priebe03,
    author = {Priebe, N. and Cassanello, C. and Lisberger, S.},
    citeulike-article-id = {9562722},
    date-added = {2011-07-19 14:56:38},
    journal = {J. Neurosci.},
    keywords = {area-mt, dots, random, sanz12jnp, spatiotemporal-filtering, speed, tuning},
    pages = {5650--5661},
    priority = {2},
    title = {The neural representation of speed in macaque area {MT/V5}.},
    volume = {23},
    year = {2003}
}

@article{smith90,
    author = {Smith, A. T. and Edgar, G. K.},
    citeulike-article-id = {9562721},
    date-added = {2011-07-19 14:56:38},
    journal = {Vision Res.},
    keywords = {discrimination, frequency, motion, motion\_perception, perception, sanz12jnp, spatial, speed, temporal},
    pages = {1467--1474},
    priority = {2},
    title = {The influence of spatial frequency on perceived temporal frequency and perceived speed.},
    volume = {30},
    year = {1990}
}

@article{watson94,
    author = {Watson, A. B. and Eckert, M. P.},
    citeulike-article-id = {9562720},
    date-added = {2011-07-19 14:56:38},
    journal = {J. Opt. Soc. Am. A},
    keywords = {corrugated, frequency, gradients, motion, pattern, sanz12jnp, spatial},
    pages = {486--505},
    priority = {2},
    title = {Motion-contrast sensitivity : visibility of motion gradients of various spatial frequencies.},
    volume = {11},
    year = {1994}
}

@misc{Psychopy,
    citeulike-article-id = {9520749},
    date-added = {2011-07-07 09:50:54},
    keywords = {python, sanz12jnp},
    priority = {0},
    title = {Psychopy}
}

@article{Visionegg,
    author = {Straw, A. D.},
    citeulike-article-id = {9520748},
    date-added = {2011-07-07 09:50:54},
    journal = {Front Neuroinformatics},
    keywords = {python, sanz12jnp},
    priority = {0},
    title = {Vision Egg: An {Open-Source} Library for Realtime Visual Stimulus Generation},
    year = {2008}
}

@article{Pelli97,
    author = {Pelli, Denis G.},
    citeulike-article-id = {9520747},
    citeulike-linkout-0 = {http://dx.doi.org/10.1163/156856897X00366},
    date-added = {2011-07-07 09:50:54},
    doi = {10.1163/156856897X00366},
    journal = {Spatial Vision},
    keywords = {sanz12jnp, stimulus},
    pages = {437--442},
    priority = {0},
    title = {The {VideoToolbox} software for visual psychophysics: transforming numbers into movies},
    url = {http://dx.doi.org/10.1163/156856897X00366},
    volume = {10},
    year = {1997}
}

@article{Brainard97,
    author = {Brainard, David H.},
    citeulike-article-id = {9520746},
    citeulike-linkout-0 = {http://dx.doi.org/10.1163/156856897X00357},
    date-added = {2011-07-07 09:50:53},
    doi = {10.1163/156856897X00357},
    journal = {Spatial Vision},
    keywords = {sanz12jnp},
    pages = {433--436},
    priority = {0},
    title = {The Psychophysics Toolbox},
    url = {http://dx.doi.org/10.1163/156856897X00357},
    volume = {10},
    year = {1997}
}

@article{Conway07,
    author = {Conway, B. R. and Moeller, S. and Tao},
    citeulike-article-id = {9520745},
    date-added = {2011-07-07 09:50:53},
    journal = {Neuron},
    keywords = {sanz12jnp},
    month = nov,
    number = {3},
    priority = {0},
    title = {Specialized Color Modules in Macaque Extrastriate Cortex},
    volume = {56},
    year = {2007}
}

@article{Geisler08,
    abstract = {The environments in which an organism lives and the tasks it performs within those environments shape its perceptual systems through evolution and experience. This is an obvious statement, but it implies several fundamental components of research that are needed if we are going to gain a deep understanding of perceptual systems. The first is to identify the natural tasks and sub-tasks that are performed by the organism under natural conditions. The second is to measure and analyze those specific environmental properties (natural scene statistics) relevant for performing the tasks. The third is a computational analysis to determine how a rational (ideal) perceptual system would exploit the measured environmental properties to perform the tasks. This component is critical because it provides insight into the information contained in the natural stimuli and it can suggest principled hypotheses for the neural mechanisms the organism might use to exploit that information. The fourth component is to formulate specific hypotheses for neural mechanisms, based on the first three components, and test them in physiological and behavioral studies that capture the essence of the natural task. This general approach is illustrated with a study of contour grouping that combines measurements of natural scene statistics, derivation of ideal Bayesian observers that exploit those statistics, and psychophysical experiments that compare human and ideal performance. This study and other recent studies demonstrate the great potential of ``natural systems analysis'' for producing advances in behavioral science and systems neuroscience.},
    author = {Geisler, Wilson S. and Ringach, Dario},
    citeulike-article-id = {9520744},
    citeulike-linkout-0 = {http://dx.doi.org/10.1017/S0952523808081005},
    date-added = {2011-07-07 09:50:53},
    doi = {10.1017/S0952523808081005},
    journal = {Visual Neurosci},
    keywords = {sanz12jnp},
    pages = {1--3},
    priority = {0},
    title = {Natural systems analysis},
    url = {http://dx.doi.org/10.1017/S0952523808081005},
    volume = {26},
    year = {2009}
}

@book{DeValois1988Spatial,
    author = {DeValois, Russell L. and DeValois, Karen K.},
    citeulike-article-id = {7225499},
    citeulike-linkout-0 = {http://www.worldcat.org/isbn/0195050193},
    citeulike-linkout-1 = {http://www.amazon.ca/exec/obidos/redirect?tag=citeulike09-20\&path=ASIN/0195050193},
    citeulike-linkout-10 = {http://www.librarything.com/isbn/0195050193},
    citeulike-linkout-2 = {http://www.amazon.de/exec/obidos/redirect?tag=citeulike01-21\&path=ASIN/0195050193},
    citeulike-linkout-3 = {http://www.amazon.fr/exec/obidos/redirect?tag=citeulike06-21\&path=ASIN/0195050193},
    citeulike-linkout-4 = {http://www.amazon.jp/exec/obidos/ASIN/0195050193},
    citeulike-linkout-5 = {http://www.amazon.co.uk/exec/obidos/ASIN/0195050193/citeulike00-21},
    citeulike-linkout-6 = {http://www.amazon.com/exec/obidos/redirect?tag=citeulike07-20\&path=ASIN/0195050193},
    citeulike-linkout-7 = {http://www.worldcat.org/isbn/0195050193},
    citeulike-linkout-8 = {http://books.google.com/books?vid=ISBN0195050193},
    citeulike-linkout-9 = {http://www.amazon.com/gp/search?keywords=0195050193\&index=books\&linkCode=qs},
    date-added = {2011-07-04 16:50:41},
    day = {24},
    howpublished = {Hardcover},
    isbn = {0195050193},
    keywords = {sanz12jnp},
    month = mar,
    priority = {2},
    publisher = {Oxford University Press, USA},
    title = {Spatial Vision},
    url = {http://www.worldcat.org/isbn/0195050193},
    year = {1988}
}

@article{Dan96,
    author = {Dan, Yang and Atick, Joseph J. and Reid, R. C.},
    citeulike-article-id = {9503668},
    date-added = {2011-07-04 14:38:14},
    journal = {J. Neurosci.},
    keywords = {sanz12jnp},
    month = may,
    number = {10},
    pages = {3351--62},
    priority = {2},
    title = {Efficient coding of natural scenes in the lateral geniculate nucleus: experimental test of a computational theory},
    volume = {16},
    year = {1996}
}

@article{Rust06,
    abstract = {Neurons in area {MT} (V5) are selective for the direction of visual motion. In addition, many are selective for the motion of complex patterns independent of the orientation of their components, a behavior not seen in earlier visual areas. We show that the responses of {MT} cells can be captured by a linear-nonlinear model that operates not on the visual stimulus, but on the afferent responses of a population of nonlinear V1 cells. We fit this cascade model to responses of individual {MT} neurons and show that it robustly predicts the separately measured responses to gratings and plaids. The model captures the full range of pattern motion selectivity found in {MT}. Cells that signal pattern motion are distinguished by having convergent excitatory input from V1 cells with a wide range of preferred directions, strong motion opponent suppression and a tuned normalization that may reflect suppressive input from the surround of V1 cells.},
    address = {Howard Hughes Medical Institute, New York University, New York, New York 10003, USA. rust@mit.edu},
    author = {Rust, Nicole C. and Mante, Valerio and Simoncelli, Eero P. and Movshon, J. Anthony},
    citeulike-article-id = {9503667},
    citeulike-linkout-0 = {http://dx.doi.org/10.1038/nn1786},
    comment = {* "To test this model, we developed a new technique that allowed us to fit the cascaded L-N model to data from individual MT cells. We show that the model captures the variety" * see Rust06supp
---=note-separator=---
* uses a whole set of data in MT neurons to fit different configurations of plaids. provides useful numbers for estimating input selectivity, integration width, divisive normalisation for different cell types (from component to pattern cells). * "To test this model, we developed a new technique that allowed us to fit the cascaded L-N model to data from individual MT cells. We show that the model captures the variety" * see Rust06supp
---=note-separator=---
* "To test this model, we developed a new technique that allowed us to fit the cascaded L-N model to data from individual {MT} cells. We show that the model captures the variety" * see Rust06supp},
    date-added = {2011-07-04 14:38:14},
    doi = {10.1038/nn1786},
    journal = {Nature {N}euroscience},
    keywords = {khoei12jpp, motion, motion-perception, perception, perrinet12pred, sanz12jnp},
    number = {11},
    pages = {1421--31},
    priority = {2},
    title = {How {MT} cells analyze the motion of visual patterns},
    url = {http://dx.doi.org/10.1038/nn1786},
    volume = {9},
    year = {2006}
}

@article{Newsome1997Deciding,
    author = {Newsome, W. T.},
    citeulike-article-id = {9503620},
    citeulike-linkout-0 = {http://dx.doi.org/10.1007/s003590050087},
    citeulike-linkout-1 = {http://www.springerlink.com/content/t5v94e2jag9gbj4p},
    date-added = {2011-07-04 14:36:11},
    day = {8},
    doi = {10.1007/s003590050087},
    issn = {0340-7594},
    journal = {J. Comp. Physiol. A. Neuroethol. Sens. Neural Behav. Physiol.},
    keywords = {sanz12jnp},
    month = jun,
    number = {1},
    pages = {5--12},
    priority = {3},
    publisher = {Springer Berlin / Heidelberg},
    title = {Deciding about motion: linking perception to action},
    url = {http://dx.doi.org/10.1007/s003590050087},
    volume = {181},
    year = {1997}
}

@article{Braddick1993Segmentation,
    abstract = {Reliable motion perception requires processes that integrate visual motion signals from neighbouring locations in the visual field, which should have the effect of smoothing out spatial variations in velocity. However, we also require motion processing to be very sensitive to local velocity differences, so that moving objects appear sharply distinct from their background and specific differential properties of optic flow associated with the observer's motion can be detected. Perceptual experiments give evidence both for integrative processes, which lead to spreading of perceived motion, and for differential processes, which lead to motion contrast and segmentation. Current and future experiments might allow tests of theoretical schemes that employ adaptive networks and/or multiple representations in order to reconcile the conflicting demands of integration and segmentation.},
    author = {Braddick, Oliver},
    citeulike-article-id = {9503586},
    citeulike-linkout-0 = {http://dx.doi.org/10.1016/0166-2236(93)90179-P},
    date-added = {2011-07-04 14:34:30},
    doi = {10.1016/0166-2236(93)90179-P},
    issn = {01662236},
    journal = {Trends Neurosci.},
    keywords = {sanz12jnp},
    month = jul,
    number = {7},
    pages = {263--268},
    priority = {2},
    title = {Segmentation versus integration in visual motion processing},
    url = {http://dx.doi.org/10.1016/0166-2236(93)90179-P},
    volume = {16},
    year = {1993}
}

@article{Felsen2005Natural,
    abstract = {An ultimate goal of systems neuroscience is to understand how sensory stimuli encountered in the natural environment are processed by neural circuits. Achieving this goal requires knowledge of both the characteristics of natural stimuli and the response properties of sensory neurons under natural stimulation. Most of our current notions of sensory processing have come from experiments using simple, parametric stimulus sets. However, a growing number of researchers have begun to question whether this approach alone is sufficient for understanding the real-life sensory tasks performed by the organism. Here, focusing on the early visual pathway, we argue that the use of natural stimuli is vital for advancing our understanding of sensory processing.},
    author = {Felsen, Gidon and Dan, Yang},
    citeulike-article-id = {406498},
    citeulike-linkout-0 = {http://dx.doi.org/10.1038/nn1608},
    citeulike-linkout-1 = {http://dx.doi.org/10.1038/nn1608},
    citeulike-linkout-2 = {http://view.ncbi.nlm.nih.gov/pubmed/16306891},
    citeulike-linkout-3 = {http://www.hubmed.org/display.cgi?uids=16306891},
    date-added = {2011-07-04 14:30:06},
    day = {23},
    doi = {10.1038/nn1608},
    issn = {1097-6256},
    journal = {Nat. Neurosci.},
    keywords = {sanz12jnp},
    month = nov,
    number = {12},
    pages = {1643--1646},
    pmid = {16306891},
    priority = {3},
    publisher = {Nature Publishing Group},
    title = {A natural approach to studying vision},
    url = {http://dx.doi.org/10.1038/nn1608},
    volume = {8},
    year = {2005}
}

@article{Field99,
    abstract = {The processing of spatial information by the visual system shows a number of similarities to the wavelet transforms that have become popular in applied mathematics. Over the last decade, a range of studies have focused on the question of 'why' the visual system would evolve this strategy of coding spatial information. One such approach has focused on the relationship between the visual code and the statistics of natural scenes under the assumption that the visual system has evolved this strategy as a means of optimizing the representation of its visual environment. This paper reviews some of this literature and looks at some of the statistical properties of natural scenes that allow this code to be efficient. It is argued that such wavelet codes are efficient because they increase the independence of the vectors' outputs (i.e. they increase the independence of the responses of the visual neurons) by finding the sparse structure available in the input. Studies with neural networks that attempt to maximize the 'sparsity' of the representation have been shown to produce vectors (neural receptive fields) that have many of the properties of a wavelet representation. It is argued that the visual environment has the appropriate sparse structure to make this sparse output possible. It is argued that these sparse/independent representations make it computationally easier to detect and represent the higher--order structure present in complex environmental data.},
    author = {Field, D. J.},
    citeulike-article-id = {9503551},
    citeulike-linkout-0 = {http://dx.doi.org/10.1098/rsta.1999.0446},
    citeulike-linkout-1 = {http://rsta.royalsocietypublishing.org/content/357/1760/2527.abstract},
    citeulike-linkout-2 = {http://rsta.royalsocietypublishing.org/content/357/1760/2527.full.pdf},
    date-added = {2011-07-04 14:28:53},
    day = {1},
    doi = {10.1098/rsta.1999.0446},
    journal = {Philos T Roy Soc A},
    keywords = {sanz12jnp},
    month = sep,
    number = {1760},
    pages = {2527--2542},
    priority = {3},
    title = {Wavelets, vision and the statistics of natural scenes},
    url = {http://dx.doi.org/10.1098/rsta.1999.0446},
    volume = {357},
    year = {1999}
}

@article{Henriksson08,
    abstract = {Human medial occipital cortex comprises multiple visual areas, each with a distinct retinotopic representation of visual environment. We measured spatial frequency ({SF}) tuning curves with functional magnetic resonance imaging ({fMRI}) and found consistent differences between these areas. Areas V1, V2, {VP}, V3, V4v, and {V3A} were all band-pass tuned, with progressively lower {SF} optima in V1, V2, and {V3A}. In {VP} and V3, the {SF} optima were similar to optima in V2, whereas V4v showed more individual variation and scattered {SF} representations on the cortical surface. Area V5+ showed low-pass {SF} tuning. In each area, the {SF} optimum declined with increasing eccentricity. After accounting for the cortical magnification, the cortical extent of the optimal spatial wavelengths was approximately constant across eccentricity in V1, which suggests an anatomical constraint for the optimal {SF}, and this extent is actually comparable to the extent of horizontal connections within primate V1. The optimal spatial wavelengths in the visual field are also of similar extent to the spatial summation fields of macaque V1. The progressive decline in the {SF} tuning from V1 to V2 and {V3A} is compatible with the view that these areas represent visual information at different spatial scales.},
    author = {Henriksson, Linda and Nurminen, Lauri and Hyv{\"{a}}rinen, Aapo and Vanni, Simo},
    citeulike-article-id = {9503545},
    citeulike-linkout-0 = {http://dx.doi.org/10.1167/8.10.5},
    citeulike-linkout-1 = {http://www.journalofvision.org/content/8/10/5.abstract},
    citeulike-linkout-2 = {http://www.journalofvision.org/content/8/10/5.full.pdf},
    citeulike-linkout-3 = {http://view.ncbi.nlm.nih.gov/pubmed/19146347},
    citeulike-linkout-4 = {http://www.hubmed.org/display.cgi?uids=19146347},
    date-added = {2011-07-04 14:27:52},
    day = {1},
    doi = {10.1167/8.10.5},
    journal = {J. Vis.},
    keywords = {sanz12jnp},
    month = aug,
    number = {10},
    pmid = {19146347},
    priority = {2},
    title = {Spatial frequency tuning in human retinotopic visual areas},
    url = {http://dx.doi.org/10.1167/8.10.5},
    volume = {8},
    year = {2008}
}

@article{Singh2000Spatiotemporal,
    abstract = {Using functional magnetic resonance imaging ({fMRI}) we have studied the variation in response magnitude, in each visual area ({V1-V5}), as a function of spatial frequency ({SF}), temporal frequency ({TF}) and unidirectional motion versus counterphase flicker. Each visual area was identified in each subject using a combination of retinotopic mapping {fMRI} and cortical flattening techniques. A drifting (or counterphasing) sinusoidal grating was used as the stimulus in a study in which we parametrically varied {SF} between 0.4 and 7 cycles/degree and {TF} between 0 and 18 Hz. For each experiment we constructed {fMRI} amplitude tuning curves, averaged across subjects, for each visual area. The tuning curves that resulted are consistent with the known physiological properties of cells in the corresponding macaque visual areas, previous functional imaging studies, and in the case of V1, the psychophysically determined contrast sensitivity functions for spatial and temporal frequency. In the case of {V3A}, the {SF} tuning functions obtained were more similar to those found in single cell studies of macaque V3 rather than macaque {V3A}. All areas showed at least a moderate preference for directed versus counterphasing motion with V5 showing the largest preference. Visual areas V1, V2, V3, and {V3A} showed more direction sensitivity at low spatial frequencies, while {VP}, V4, and V5 had the highest drifting versus counterphasing ratios for higher spatial frequencies.},
    author = {Singh, K. D. and Smith, A. T. and Greenlee, M. W.},
    citeulike-article-id = {9503544},
    citeulike-linkout-0 = {http://dx.doi.org/10.1006/nimg.2000.0642},
    citeulike-linkout-1 = {http://view.ncbi.nlm.nih.gov/pubmed/11034862},
    citeulike-linkout-2 = {http://www.hubmed.org/display.cgi?uids=11034862},
    date-added = {2011-07-04 14:26:27},
    doi = {10.1006/nimg.2000.0642},
    issn = {1053-8119},
    journal = {NeuroImage},
    keywords = {sanz12jnp},
    month = nov,
    number = {5},
    pages = {550--564},
    pmid = {11034862},
    priority = {2},
    title = {Spatiotemporal frequency and direction sensitivities of human visual areas measured using {fMRI}.},
    url = {http://dx.doi.org/10.1006/nimg.2000.0642},
    volume = {12},
    year = {2000}
}

@article{Shmuel96,
    abstract = {The goal of this study was to explore the functional organization of direction of motion in cat area 18. Optical imaging was used to record the activity of populations of neurons. We found a patchy distribution of cortical regions exhibiting preference for one direction over the opposite direction of motion. The degree of clustering according to preference of direction was two to four times smaller than that observed for orientation. In general, direction preference changed smoothly along the cortical surface; however, discontinuities in the direction maps were observed. These discontinuities formed lines that separated pairs of patches with preference for opposite directions. The functional maps for direction and for orientation preference were closely related; typically, an iso-orientation patch was divided into regions that exhibited preference for opposite directions, orthogonal to the orientation. In addition, the lines of discontinuity within the direction map often connected points of singularity in the orientation map. Although the organization of both domains was related, the direction and the orientation selective responses were separable; whereas the selective response according to direction of motion was nearly independent of the length of bars used for visual stimulation, the selective response to orientation decreased significantly with decreasing length of the bars. Extensive single and multiunit electrical recordings, targeted to selected domains of the functional maps, confirmed the features revealed by optical imaging. We conclude that significant processing of direction of motion is performed early in the cat visual pathway.},
    author = {Shmuel, Amir and Grinvald, Amiram},
    citeulike-article-id = {3287147},
    citeulike-linkout-0 = {http://www.jneurosci.org/content/16/21/6945.abstract},
    citeulike-linkout-1 = {http://www.jneurosci.org/content/16/21/6945.abstract},
    citeulike-linkout-2 = {http://www.jneurosci.org/content/16/21/6945.full.pdf},
    citeulike-linkout-3 = {http://www.jneurosci.org/cgi/content/abstract/16/21/6945},
    citeulike-linkout-4 = {http://view.ncbi.nlm.nih.gov/pubmed/8824332},
    citeulike-linkout-5 = {http://www.hubmed.org/display.cgi?uids=8824332},
    date-added = {2011-07-04 14:25:14},
    day = {1},
    journal = {J. Neurosci.},
    keywords = {sanz12jnp},
    month = nov,
    number = {21},
    pages = {6945--6964},
    pmid = {8824332},
    priority = {3},
    title = {Functional Organization for Direction of Motion and Its Relationship to Orientation Maps in Cat Area 18},
    url = {http://www.jneurosci.org/content/16/21/6945.abstract},
    volume = {16},
    year = {1996}
}

@article{Everson1998Representation,
    abstract = {Knowledge of the response of the primary visual cortex to the various spatial frequencies and orientations in the visual scene should help us understand the principles by which the brain recognizes patterns. Current information about the cortical layout of spatial frequency response is still incomplete because of difficulties in recording and interpreting adequate data. Here, we report results from a study of the cat primary visual cortex in which we employed a new image-analysis method that allows improved separation of signal from noise and that we used to examine the neurooptical response of the primary visual cortex to drifting sine gratings over a range of orientations and spatial frequencies. We found that (i) the optical responses to all orientations and spatial frequencies were well approximated by weighted sums of only two pairs of basis pictures, one pair for orientation and a different pair for spatial frequency; (ii) the weightings of the two pictures in each pair were approximately in quadrature (1/4 cycle apart); and (iii) our spatial frequency data revealed a cortical map that continuously assigns different optimal spatial frequency responses to different cortical locations over the entire spatial frequency range.},
    author = {Everson, R. M. and Prashanth, A. K. and Gabbay, M. and Knight, B. W. and Sirovich, L. and Kaplan, E.},
    citeulike-article-id = {2582105},
    citeulike-linkout-0 = {http://dx.doi.org/10.1073/pnas.95.14.8334},
    citeulike-linkout-1 = {http://www.pnas.org/cgi/content/abstract/95/14/8334},
    citeulike-linkout-2 = {http://www.ncbi.nlm.nih.gov/pmc/articles/PMC20976/},
    citeulike-linkout-3 = {http://view.ncbi.nlm.nih.gov/pubmed/9653187},
    citeulike-linkout-4 = {http://www.hubmed.org/display.cgi?uids=9653187},
    date-added = {2011-07-04 14:21:22},
    day = {7},
    doi = {10.1073/pnas.95.14.8334},
    issn = {0027-8424},
    journal = {Proc. Natl. Acad. Sci. U. S. A.},
    keywords = {sanz12jnp},
    month = jul,
    number = {14},
    pages = {8334--8338},
    pmcid = {PMC20976},
    pmid = {9653187},
    priority = {2},
    title = {Representation of spatial frequency and orientation in the visual cortex.},
    url = {http://dx.doi.org/10.1073/pnas.95.14.8334},
    volume = {95},
    year = {1998}
}

@article{Watson83,
    author = {Watson, Andrew B. and Barlow, H. B. and Robson, John G.},
    citeulike-article-id = {9503503},
    citeulike-linkout-0 = {http://dx.doi.org/10.1038/302419a0},
    citeulike-linkout-1 = {http://dx.doi.org/10.1038/302419a0},
    date-added = {2011-07-04 14:19:49},
    day = {31},
    doi = {10.1038/302419a0},
    journal = {Nature},
    keywords = {sanz12jnp},
    month = mar,
    number = {5907},
    pages = {419--422},
    priority = {2},
    publisher = {Nature Publishing Group},
    title = {What does the eye see best?},
    url = {http://dx.doi.org/10.1038/302419a0},
    volume = {302},
    year = {1983}
}

@article{Graham79,
    abstract = {The decomposition of a complex auditory sound into its constituent simple harmonic variations (pure tones) is an example of Fourier analysis. Does the brain do something like this to visual scenes, decomposing a visual pattern into some simpler representation to help ease the information-processing load? As Norma Graham explains below, the answer to this question is both yes and no. But, yes or no, the idea that the brain might do a Fourier analysis of the visual scene has been a powerful impetus to much exciting visual research in the last decade.},
    author = {Graham, Norma},
    citeulike-article-id = {9503494},
    citeulike-linkout-0 = {http://dx.doi.org/10.1016/0166-2236(79)90082-1},
    date-added = {2011-07-04 14:18:37},
    doi = {10.1016/0166-2236(79)90082-1},
    issn = {01662236},
    journal = {Trends Neurosci.},
    keywords = {sanz12jnp},
    month = jan,
    pages = {207--208},
    priority = {2},
    title = {Does the brain perform a Fourier analysis of the visual scene?},
    url = {http://dx.doi.org/10.1016/0166-2236(79)90082-1},
    volume = {2},
    year = {1979}
}

@article{Rust05a,
    abstract = {
                Neurons in primary visual cortex (V1) are commonly classified as simple or complex based upon their sensitivity to the sign of stimulus contrast. The responses of both cell types can be described by a general model in which the outputs of a set of linear filters are nonlinearly combined. We estimated the model for a population of V1 neurons by analyzing the mean and covariance of the spatiotemporal distribution of random bar stimuli that were associated with spikes. This analysis reveals an unsuspected richness of neuronal computation within V1. Specifically, simple and complex cell responses are best described using more linear filters than the one or two found in standard models. Many filters revealed by the model contribute suppressive signals that appear to have a predominantly divisive influence on neuronal firing. Suppressive signals are especially potent in direction-selective cells, where they reduce responses to stimuli moving in the nonpreferred direction.
            },
    address = {Center for Neural Science and New York University, New York, New York 10003.},
    author = {Rust, Nicole C. and Schwartz, Odelia and Movshon, J. Anthony and Simoncelli, Eero P.},
    citeulike-article-id = {230054},
    citeulike-linkout-0 = {http://dx.doi.org/10.1016/j.neuron.2005.05.021},
    citeulike-linkout-1 = {http://view.ncbi.nlm.nih.gov/pubmed/15953422},
    citeulike-linkout-2 = {http://www.hubmed.org/display.cgi?uids=15953422},
    date-added = {2011-07-04 14:15:33},
    day = {16},
    doi = {10.1016/j.neuron.2005.05.021},
    issn = {0896-6273},
    journal = {Neuron},
    keywords = {area-v1, sanz12jnp},
    month = jun,
    number = {6},
    pages = {945--956},
    pmid = {15953422},
    priority = {3},
    title = {Spatiotemporal elements of macaque v1 receptive fields.},
    url = {http://dx.doi.org/10.1016/j.neuron.2005.05.021},
    volume = {46},
    year = {2005}
}

@article{Enroth-Cugell66,
    author = {Enroth-Cugell, C. and Robson, J. G.},
    citeulike-article-id = {9502426},
    date-added = {2011-07-04 11:21:48},
    journal = {J. Physiol. Paris},
    keywords = {sanz12jnp},
    number = {187},
    pages = {517--23},
    priority = {2},
    title = {The {C}ontrast {S}ensitivity of {R}etinal {G}anglion {C}ells of the {C}at.},
    volume = {3},
    year = {1966}
}

@article{Schwartz01,
    author = {Schwartz, Odelia and Simoncelli, Eero P.},
    citeulike-article-id = {9502425},
    date-added = {2011-07-04 11:21:48},
    journal = {Nature Neuroscience},
    keywords = {sanz12jnp},
    number = {8},
    pages = {819--25},
    priority = {0},
    title = {Natural signal statistics and sensory gain control},
    volume = {4},
    year = {2001}
}

@article{Turing52,
    author = {Turing, A.},
    citeulike-article-id = {9502424},
    comment = {A. M. Turing (1952). The Chemical Basis of Morphogenesis. Philosophical Transactions of the Royal Society of London, volume B 237, pages 37--72. [turing:1952] A. M. Turing (1992). The morphogen theory of phyllotaxis. In Saunders (1992). [turing:1992] A. N. Kolmogorov and I. G. Petrovsky and N. S. Piskunov (1937). Etude de l'{\'{e}}quation de la diffusion avec croissance de la quantit{\'{e}} de mati{\'{e}}re et son application {\`{a}} un probl{\'{e}}me biologique. Bulletin Universit{\'{e}} d'Etat {\`{a}} Moscou (Bjul. Moskowskogo Gos. Univ.), S{\'{e}}rie Internationale, volume Section A 1, pages 1-26. [kpp:1937] [Actually, I've never checked this reference or looked at it. Presumably it introduces the KPP equation I learnt about as a graduate student, in which case I'm not sure it was quite as long unknown as my quote from Jean implies]. (in http://www.swintons.net/deodands/references.html )},
    date-added = {2011-07-04 11:21:48},
    journal = {Philos. Trans. Roy. Soc. London Ser. B},
    keywords = {sanz12jnp},
    pages = {37--72},
    priority = {2},
    title = {The chemical basis of morphogenesis},
    volume = {327},
    year = {1952}
}

@article{Drewes08,
    abstract = {It is commonly assumed that the visual system is optimized to process naturalistic inputs for both low and high level processing. Here we search for an advantageous effect of natural scene statistics when estimating speed. Ocular following responses ({OFRs}) are reflexive eye movements known to reflect many properties of low-level motion processing. Using the scleral search coil technique, we recorded human {OFRs} to drifting sinusoidal gratings ({1D}) as well as narrow bandpass noise images ({2D}).For sinusoidal gratings, it was previously shown that {OFRs} are best elicited with low spatial frequency stimuli ([[lt]]1cpd) moving at optimal speed (20--40\\$\\\\,^{\\\\circ}\\$/s). We were able to confirm this for {2D} noise stimuli as well. However, we found a systematic difference in the acceleration profiles: {OFRs} to {2D} noise stimuli consistently showed longer latencies, yet stronger overall responses than the {1D} gratings. When combining two or more spatial frequencies, we found a gain in response strength mostly in the higher spatial frequency range. Also, we found evidence that stimuli consisting of the normalized sum of several spatial frequencies can create stronger {OFRs} than the normalized sum of the {OFRs} to the individual frequencies. When combining spatial frequencies, the weighting (mix ratio) of the individual frequencies influences the response gain. The optimum weighting with multiple bandpass noises appeared to be similar to the spectral shape of natural scenes ({1/f).These} results show a systematic difference between {OFRs} evoked by {1D} gratings and {2D} noises, and provide a first behavioral evidence that speed is best estimated by combining information across different channels, with weighting based on natural scene statistics.},
    author = {Drewes, Jan and Barthelemy, Frederic and Masson, Guillaume S.},
    citeulike-article-id = {9473860},
    citeulike-linkout-0 = {http://dx.doi.org/10.1167/8.6.383},
    citeulike-linkout-1 = {http://www.journalofvision.org/content/8/6/383.abstract},
    citeulike-linkout-2 = {http://www.journalofvision.org/content/8/6/383.full.pdf},
    date-added = {2011-06-29 15:10:23},
    day = {10},
    doi = {10.1167/8.6.383},
    journal = {Journal of Vision},
    keywords = {motion\_clouds, natural\_scenes, sanz12jnp},
    month = may,
    number = {6},
    pages = {383},
    priority = {2},
    title = {Human ocular following and natural scene statistics},
    url = {http://dx.doi.org/10.1167/8.6.383},
    volume = {8},
    year = {2008}
}

@article{Klein00,
    abstract = {The spectrotemporal receptive field ({STRF}) is a functional descriptor of the linear processing of time-varying acoustic spectra by the auditory system. By cross-correlating sustained neuronal activity with the dynamic spectrum of a spectrotemporally rich stimulus ensemble, one obtains an estimate of the {STRF}. In this article, the relationship between the spectrotemporal structure of any given stimulus and the quality of the {STRF} estimate is explored and exploited. Invoking the Fourier theorem, arbitrary dynamic spectra are described as sums of basic sinusoidal components---that is, moving ripples. Accurate estimation is found to be especially reliant on the prominence of components whose spectral and temporal characteristics are of relevance to the auditory locus under study and is sensitive to the phase relationships between components with identical temporal signatures. These and other observations have guided the development and use of stimuli with deterministic dynamic spectra composed of the superposition of many temporally orthogonal moving ripples having a restricted, relevant range of spectral scales and temporal rates. The method, termed sum-of-ripples, is similar in spirit to the white-noise approach butenjoys the same practical advantages---which equate to faster and moreaccurate estimation---attributable to the time-domain sum-of-sinusoidsmethod previously employed in vision research. Application of the methodis exemplified with both modeled data and experimental data from ferretprimary auditory cortex ({AI}).},
    author = {Klein, D. J. and Depireux, D. A. and Simon, J. Z. and Shamma, S. A.},
    citeulike-article-id = {9448283},
    citeulike-linkout-0 = {http://www.isr.umd.edu/CAAR/papers/jcns.pdf},
    citeulike-linkout-1 = {http://www.isr.umd.edu/CAAR/papers/jcns.pdf },
    citeulike-linkout-2 = {http://dx.doi.org/10.1023/A:1008990412183},
    citeulike-linkout-3 = {http://www.springerlink.com/content/n1267xl623712325},
    comment = {http://www.isr.umd.edu/CAAR/papers/jcns.pdf

This paper first provides a clear overview of the various methods for characterizing the input–output transformation of auditory neurons. It convincingly advocated and thoroughly developed the use of 'sum-of-ripples', a type of stimuli consisting of the superposition of multiple Fourier components in the spectrotemporal domain
---=note-separator=---
http://www.isr.umd.edu/CAAR/papers/jcns.pdf This paper first provides a clear overview of the various methods for characterizing the input--output transformation of auditory neurons. It convincingly advocated and thoroughly developed the use of 'sum-of-ripples', a type of stimuli consisting of the superposition of multiple Fourier components in the spectrotemporal domain
---=note-separator=---
http://www.isr.umd.edu/CAAR/papers/jcns.pdf This paper first provides a clear overview of the various methods for characterizing the input–output transformation of auditory neurons. It convincingly advocated and thoroughly developed the use of 'sum-of-ripples', a type of stimuli consisting of the superposition of multiple Fourier components in the spectrotemporal domain},
    date-added = {2011-06-22 12:24:55},
    day = {1},
    doi = {10.1023/A:1008990412183},
    issn = {09295313},
    journal = {J. Comput. Neurosci.},
    keywords = {motion\_clouds, sanz12jnp},
    month = jul,
    number = {1},
    pages = {85--111},
    priority = {2},
    publisher = {Springer Netherlands},
    title = {Robust Spectrotemporal Reverse Correlation for the Auditory System: Optimizing Stimulus Design},
    url = {http://www.isr.umd.edu/CAAR/papers/jcns.pdf},
    volume = {9},
    year = {2000}
}

@article{Lee96,
    author = {Lee, Tai S.},
    citeulike-article-id = {9447894},
    date-added = {2011-06-22 10:47:35},
    journal = {IEEE T. Pattern. Anal.},
    keywords = {gabor, sanz12jnp},
    pages = {959--971},
    priority = {0},
    title = {Image Representation Using {2D} Gabor Wavelets},
    volume = {18},
    year = {1996}
}

@article{Marcelja80,
    abstract = {On the basis of measured receptive field profiles and spatial frequency tuning characteristics of simple cortical cells, it can be concluded that the representation of an image in the visual cortex must involve both spatial and spatial frequency variables. In a scheme due to Gabor, an image is represented in terms of localized symmetrical and antisymmetrical elementary signals. Both measured receptive fields and measured spatial frequency tuning curves conform closely to the functional form of Gabor elementary signals. It is argued that the visual cortex representation corresponds closely to the Gabor scheme owing to its advantages in treating the subsequent problem of pattern recognition.},
    author = {Marcelja, S.},
    citeulike-article-id = {9447893},
    citeulike-linkout-0 = {http://dx.doi.org/10.1364/JOSA.70.001297},
    date-added = {2011-06-22 10:47:35},
    day = {1},
    doi = {10.1364/JOSA.70.001297},
    journal = {J. Opt. Soc. Am. A},
    keywords = {gabor, sanz12jnp, simple},
    month = nov,
    number = {11},
    pages = {1297--1300},
    priority = {2},
    publisher = {OSA},
    title = {Mathematical description of the responses of simple cortical cells},
    url = {http://dx.doi.org/10.1364/JOSA.70.001297},
    volume = {70},
    year = {1980}
}

@article{Daugman88,
    author = {Daugman, John G.},
    citeulike-article-id = {9447892},
    citeulike-linkout-0 = {http://citeseer.nj.nec.com/context/16741/0},
    date-added = {2011-06-22 10:47:35},
    journal = {IEEE T. Acoust. Speech.},
    keywords = {area-v1, bibtex-import, gabor, sanz12jnp},
    number = {7},
    pages = {1169--1179},
    priority = {0},
    title = {Complete discrete {2D} Gabor transform by neural networks for image analysis and compression},
    url = {http://citeseer.nj.nec.com/context/16741/0},
    volume = {36},
    year = {1988}
}

@article{Daugman80,
    abstract = {Most vision research embracing the spatial frequency paradigm has been conceptually and mathematically a one-dimensional analysis of two-dimensional mechanisms. Spatial vision models and the experiments sustaining them have generally treated spatial frequency as a one-dimensional variable, even though receptive fields and retinal images are two-dimensional and linear transform theory obliges any frequency analysis to preserve dimension. Four models of cortical receptive fields are introduced and studied here in {2D} form, in order to illustrate the relationship between their excitatory/inhibitory spatial structure and their resulting {2D} spectral properties. It emerges that only a very special analytic class of receptive fields possess independent tuning functions for spatial frequency and orientation; namely, those profiles whose two-dimensional Fourier Transforms are expressible as the separable product of a radial function and an angular function. Furthermore, only such receptive fields would have the same orientation tuning curve for single bars as for gratings. All classes lacking this property would describe cells responsive to different orientations for different spatial frequencies and vice versa; this is shown to be the case, for example, for the Hubel \& Wiesel model of cortical orientation-tuned simple cells receiving inputs from an aligned row of center/surround {LGN} cells. When these results are considered in conjunction with psychophysical evidence for nonseparability of spatial frequency and orientation tuning properties within a '' channel'', it becomes mandatory that future spatial vision research of the Fourier genre take on an explicitly two-dimensional character.},
    author = {Daugman, John G.},
    citeulike-article-id = {9447891},
    citeulike-linkout-0 = {http://dx.doi.org/10.1016/0042-6989(80)90065-6},
    date-added = {2011-06-22 10:47:35},
    doi = {10.1016/0042-6989(80)90065-6},
    journal = {Vision Res.},
    keywords = {gabor, sanz12jnp, vision},
    number = {10},
    pages = {847--856},
    priority = {2},
    title = {Two-dimensional spectral analysis of cortical receptive field profiles},
    url = {http://dx.doi.org/10.1016/0042-6989(80)90065-6},
    volume = {20},
    year = {1980}
}

@article{Jones87,
    abstract = {1. Using the two-dimensional ({2D}) spatial and spectral response profiles described in the previous two reports, we test Daugman's generalization of Marcelja's hypothesis that simple receptive fields belong to a class of linear spatial filters analogous to those described by Gabor and referred to here as {2D} Gabor filters. 2. In the space domain, we found {2D} Gabor filters that fit the {2D} spatial response profile of each simple cell in the least-squared error sense (with a simplex algorithm), and we show that the residual error is devoid of spatial structure and statistically indistinguishable from random error. 3. Although a rigorous statistical approach was not possible with our spectral data, we also found a Gabor function that fit the {2D} spectral response profile of each simple cell and observed that the residual errors are everywhere small and unstructured. 4. As an assay of spatial linearity in two dimensions, on which the applicability of Gabor theory is dependent, we compare the filter parameters estimated from the independent {2D} spatial and spectral measurements described above. Estimates of most parameters from the two domains are highly correlated, indicating that assumptions about spatial linearity are valid. 5. Finally, we show that the functional form of the {2D} Gabor filter provides a concise mathematical expression, which incorporates the important spatial characteristics of simple receptive fields demonstrated in the previous two reports. Prominent here are 1) Cartesian separable spatial response profiles, 2) spatial receptive fields with staggered subregion placement, 3) Cartesian separable spectral response profiles, 4) spectral response profiles with axes of symmetry not including the origin, and 5) the uniform distribution of spatial phase angles. 6. We conclude that the Gabor function provides a useful and reasonably accurate description of most spatial aspects of simple receptive fields. Thus it seems that an optimal strategy has evolved for sampling images simultaneously in the {2D} spatial and spatial frequency domains.},
    author = {Jones, J. P. and Palmer, L. A.},
    citeulike-article-id = {9447890},
    citeulike-linkout-0 = {http://jn.physiology.org/content/58/6/1233.abstract},
    date-added = {2011-06-22 10:47:35},
    eprint = {http://jn.physiology.org/content/58/6/1233.full.pdf+html},
    journal = {J. Neurophysiol.},
    keywords = {gabor, sanz12jnp},
    number = {6},
    pages = {1233--1258},
    priority = {0},
    title = {An evaluation of the two-dimensional Gabor filter model of simple receptive fields in cat striate cortex},
    url = {http://jn.physiology.org/content/58/6/1233.abstract},
    volume = {58},
    year = {1987}
}

@article{Oliva01,
    author = {Oliva, Aude and Torralba, A.},
    citeulike-article-id = {9447889},
    citeulike-linkout-0 = {http://dx.doi.org/10.1023/A:1011139631724},
    date-added = {2011-06-22 10:47:34},
    doi = {10.1023/A:1011139631724},
    journal = {Int. J. Comput. Vision},
    keywords = {assofield, images, natural, natural\_scenes, sanz12jnp, statisticsfourier},
    pages = {145--175},
    priority = {0},
    title = {Modeling the Shape of the Scene: A Holistic Representation of the Spatial Envelope},
    url = {http://dx.doi.org/10.1023/A:1011139631724},
    volume = {42},
    year = {2001}
}

@incollection{Smith10,
    abstract = {Visual motion is an essential piece of information for both perceiving our environment and controlling our actions. The visual motion system has evolved as an exquisite machinery adapted to reconstruct the direction and speed of the object of interest within a few dozen milliseconds. In the last decade, tremendous progress has been made in the understanding of how the outputs of local motion detectors are integrated. In particular, its dynamics are now unveiled at neuronal and behavioral levels. Solutions for fundamental computational problems such as the aperture problem and the interplay between motion segmentation and integration have been proposed from these works and biologically-realistic simulations are been proposed. Such a multi-level approach is rooted in the fact that dynamics of these solutions can be tackled at different scales using similar tasks and stimuli. Dynamics of Visual Motion Processing offers an overview of recent work on the dynamics of motion integration with inter-related examples taken from physiology (both single-neuron and population activity) and psychophysics as well as sensorimotor control or active vision. The last section presents three different approaches for understanding and modeling motion perception of natural scenes, complex {3D} layout, and biological motion.},
    address = {Berlin-Heidelberg},
    author = {Smith, Matthew A. and Majaj, Najib and Movshon, J. Anthony},
    booktitle = {Dynamics of Visual Motion Processing: Neuronal, Behavioral and Computational Approaches},
    citeulike-article-id = {9447888},
    date-added = {2011-06-22 10:47:34},
    edition = {First},
    editor = {Masson, Guillaume S. and Ilg, Uwe J.},
    keywords = {brain, computational, cortical, dynamics, integration, models, motion, neuronal, perrinet12pred, processing, sanz12jnp, selectivity},
    pages = {55--72},
    priority = {0},
    publisher = {Springer},
    title = {Dynamics of Pattern Motion Computation},
    year = {2010}
}

@incollection{Movshon85,
    author = {Movshon, J. Anthony and Adelson, Edward H. and Gizzi, Martin S. and Newsome, William T.},
    booktitle = {Pattern Recognition Mechanisms},
    citeulike-article-id = {9447887},
    date-added = {2011-06-22 10:47:34},
    editor = {Chagas, C. and Gattass, R. and Gross, C.},
    keywords = {khoei12jpp, motion, perrinet12pred, sanz12jnp},
    pages = {117--151},
    priority = {0},
    publisher = {Rome: Vatican Press},
    title = {The analysis of moving visual patterns},
    volume = {54},
    year = {1985}
}

@article{Weiss02,
    author = {Weiss, Yair and Simoncelli, Eero P. and Adelson, Edward H.},
    citeulike-article-id = {9447885},
    citeulike-linkout-0 = {http://dx.doi.org/10.1038/nn858},
    date-added = {2011-06-22 10:47:33},
    day = {20},
    doi = {10.1038/nn858},
    issn = {1097-6256},
    journal = {Nature {N}euroscience},
    keywords = {bayesian, bayesian-models, khoei12jpp, model, motion, perrinet12pred, sanz12jnp},
    month = jun,
    number = {6},
    pages = {598--604},
    priority = {0},
    title = {Motion illusions as optimal percepts},
    url = {http://dx.doi.org/10.1038/nn858},
    volume = {5},
    year = {2002}
}

@article{Mainen95,
    abstract = {It is not known whether the variability of neural activity in the cerebral cortex carries information or reflects noisy underlying mechanisms. In an examination of the reliability of spike generation using recordings from neurons in rat neocortical slices, the precision of spike timing was found to depend on stimulus transients. Constant stimuli led to imprecise spike trains, whereas stimuli with fluctuations resembling synaptic activity produced spike trains with timing reproducible to less than 1 millisecond. These data suggest a low intrinsic noise level in spike generation, which could allow cortical neurons to accurately transform synaptic input into spike sequences, supporting a possible role for spike timing in the processing of cortical information by the neocortex.},
    author = {Mainen, Z. F. and Sejnowski, T. J.},
    citeulike-article-id = {9447883},
    citeulike-linkout-0 = {http://dx.doi.org/10.1126/science.7770778},
    citeulike-linkout-1 = {http://www.sciencemag.org/cgi/content/abstract/268/5216/1503},
    date-added = {2011-06-22 10:47:32},
    doi = {10.1126/science.7770778},
    journal = {Science},
    keywords = {frozen, noise, reliability, sanz12jnp, variability},
    number = {5216},
    pages = {1503--1506},
    priority = {0},
    title = {Reliability of spike timing in neocortical neurons},
    url = {http://www.sciencemag.org/cgi/content/abstract/268/5216/1503},
    volume = {268},
    year = {1995}
}

@article{Bex02,
    author = {Bex, P. J. and Makous, W.},
    citeulike-article-id = {9447882},
    date-added = {2011-06-22 10:47:32},
    journal = {J. Opt. Soc. Am. A},
    keywords = {contrast, images, natural, natural\_scenes, sanz12jnp},
    month = jun,
    number = {06},
    pages = {1096--1106},
    priority = {0},
    title = {Spatial frequency, phase, and the contrast of natural images},
    volume = {19},
    year = {2002}
}

@article{Atick92,
    author = {Atick, J. J.},
    citeulike-article-id = {9447881},
    date-added = {2011-06-22 10:47:32},
    journal = {Network},
    keywords = {natural\_scenes, sanz12jnp},
    number = {2},
    pages = {213--52},
    priority = {0},
    title = {Could information theory provide an ecological theory of sensory processing?},
    volume = {3},
    year = {1992}
}

@misc{Scipy-ifftn,
    author = {SciPy},
    citeulike-article-id = {9447880},
    citeulike-linkout-0 = {http://docs.scipy.org/doc/numpy/reference/generated/numpy.fft.ifftn.html\#numpy.fft.ifftn},
    date-added = {2011-06-22 10:47:32},
    keywords = {fourier, python, sanz12jnp},
    month = jun,
    priority = {0},
    title = {{SciPy} Documentation},
    url = {http://docs.scipy.org/doc/numpy/reference/generated/numpy.fft.ifftn.html\#numpy.fft.ifftn},
    year = {2011}
}

@misc{neurotools,
    author = {{Daniel Bruederle}, Andrew D. and Yger, Pierre},
    citeulike-article-id = {9447879},
    citeulike-linkout-0 = {http://neuralensemble.org/trac/NeuroTools},
    date-added = {2011-06-22 10:47:31},
    keywords = {neuralensamble, python, sanz12jnp, texture, tools},
    month = jun,
    priority = {0},
    title = {{NeuroTools}},
    url = {http://neuralensemble.org/trac/NeuroTools},
    year = {2011}
}

@article{Zeki83,
    author = {Zeki, S.},
    citeulike-article-id = {9447878},
    comment = {Elsevier IBRO},
    date-added = {2011-06-22 10:47:31},
    journal = {Neuroscience},
    keywords = {representation, sanz12jnp},
    number = {4},
    pages = {741--765},
    priority = {0},
    publisher = {Elsevier},
    title = {Colour coding in the cerebral cortex: the reaction of cells in monkey visual cortex to wavelengths and colours},
    volume = {9},
    year = {1983}
}

@article{Solomon10,
    author = {Solomon, S. G. and Tailby, C. and Cheong, S. K. and Camp, A. J.},
    citeulike-article-id = {9447877},
    date-added = {2011-06-22 10:47:31},
    journal = {J. Neurophysiol.},
    keywords = {representation, sanz12jnp},
    number = {4},
    pages = {1884},
    priority = {0},
    publisher = {Am Physiological Soc},
    title = {Linear and Nonlinear Contributions to the Visual Sensitivity of Neurons in Primate Lateral Geniculate Nucleus},
    volume = {104},
    year = {2010}
}

@book{Masson10,
    abstract = {Visual motion is an essential piece of information for both perceiving our environment and controlling our actions. The visual motion system has evolved as an exquisite machinery adapted to reconstruct the direction and speed of the object of interest within a few dozen milliseconds. In the last decade, tremendous progress has been made in the understanding of how the outputs of local motion detectors are integrated. In particular, its dynamics are now unveiled at neuronal and behavioral levels. Solutions for fundamental computational problems such as the aperture problem and the interplay between motion segmentation and integration have been proposed from these works and biologically-realistic simulations are been proposed. Such a multi-level approach is rooted in the fact that dynamics of these solutions can be tackled at different scales using similar tasks and stimuli. Dynamics of Visual Motion Processing offers an overview of recent work on the dynamics of motion integration with inter-related examples taken from physiology (both single-neuron and population activity) and psychophysics as well as sensorimotor control or active vision. The last section presents three different approaches for understanding and modeling motion perception of natural scenes, complex {3D} layout, and biological motion.},
    address = {Berlin-Heidelberg},
    booktitle = {Dynamics of Visual Motion Processing: Neuronal, Behavioral and Computational Approaches},
    citeulike-article-id = {9447876},
    date-added = {2011-06-22 10:47:30},
    edition = {First},
    editor = {Masson, Guillaume S. and Ilg, Uwe J.},
    keywords = {brain, computational, cortical, dynamics, integration, models, motion, neuronal, perrinet12pred, processing, sanz12jnp, selectivity},
    priority = {0},
    publisher = {Springer},
    title = {Dynamics of visual motion processing: neuronal, behavioral and
  computational approaches},
    year = {2010}
}

@article{Wu06,
    abstract = {
                System identification is a growing approach to sensory neurophysiology that facilitates the development of quantitative functional models of sensory processing. This approach provides a clear set of guidelines for combining experimental data with other knowledge about sensory function to obtain a description that optimally predicts the way that neurons process sensory information. This prediction paradigm provides an objective method for evaluating and comparing computational models. In this chapter we review many of the system identification algorithms that have been used in sensory neurophysiology, and we show how they can be viewed as variants of a single statistical inference problem. We then review many of the practical issues that arise when applying these methods to neurophysiological experiments: stimulus selection, behavioral control, model visualization, and validation. Finally we discuss several problems to which system identification has been applied recently, including one important long-term goal of sensory neuroscience: developing models of sensory systems that accurately predict neuronal responses under completely natural conditions.
            },
    address = {1Biophysics Graduate Group, 3Department of Psychology, 4Program in Neuroscience,University of California, Berkeley, California 94720; email: gallant@berkeley.edu , 2Institute for Systems Research, University of Maryland, College Park, Maryland 20742.},
    author = {Wu, Michael C. K. and David, Stephen V. and Gallant, Jack L.},
    citeulike-article-id = {773657},
    citeulike-linkout-0 = {http://www.annualreviews.org/doi/abs/10.1146/annurev.neuro.29.051605.113024},
    citeulike-linkout-1 = {http://dx.doi.org/10.1146/annurev.neuro.29.051605.113024},
    citeulike-linkout-2 = {http://view.ncbi.nlm.nih.gov/pubmed/16776594},
    citeulike-linkout-3 = {http://www.hubmed.org/display.cgi?uids=16776594},
    date-added = {2011-06-22 10:40:39},
    doi = {10.1146/annurev.neuro.29.051605.113024},
    issn = {0147-006X},
    journal = {Annu. Rev. Neurosci.},
    keywords = {sanz12jnp, sensory},
    number = {1},
    pages = {477--505},
    pmid = {16776594},
    priority = {0},
    title = {Complete functional characterization of sensory neurons by system identification},
    url = {http://dx.doi.org/10.1146/annurev.neuro.29.051605.113024},
    volume = {29},
    year = {2006}
}

@article{Simoncelli01,
    author = {Simoncelli, Eero P. and Olshausen, Bruno A.},
    citeulike-article-id = {9447743},
    comment = {tres bon review sur les stats d'im. natu},
    date-added = {2011-06-22 09:16:48},
    journal = {Annu. Rev. Neurosci.},
    keywords = {motion\_clouds, sanz12jnp},
    pages = {1193--216},
    priority = {0},
    title = {Natural {I}mage {S}tatistics and {N}eural {R}epresentation},
    volume = {24},
    year = {2001}
}

@article{Lu01,
    abstract = {Lu and Sperling [Vision Res. 35, 2697 (1995)] proposed that human visual motion perception is served by three separate motion systems: a first-order system that responds to moving luminance patterns, a second-order system that responds to moving modulations of feature types-stimuli in which the expected luminance is the same everywhere but an area of higher contrast or of flicker moves, and a third-order system that computes the motion of marked locations in a "salience map," that is, a neural representation of visual space in which the locations of important visual features ("figure") are marked and "ground" is unmarked. Subsequently, there have been some strongly confirmatory reports: different gain-control mechanisms for first- and second-order motion, selective impairment of first- versus second- and/or third-order motion by different brain injuries, and the classification of new third-order motions, e.g., isoluminant chromatic motion. Various procedures have successfully discriminated between second- and third-order motion (when first-order motion is excluded): dual tasks, second-order reversed phi, motion competition, and selective adaptation. Meanwhile, eight apparent contradictions to the three-systems theory have been proposed. A review and reanalysis here of the new evidence, pro and con, resolves the challenges and yields a more clearly defined and significantly strengthened theory.},
    author = {Lu, Zhong-Lin and Sperling, George},
    citeulike-article-id = {9447742},
    date-added = {2011-06-22 09:16:48},
    journal = {J. Opt. Soc. Am. A},
    keywords = {motion\_clouds, perrinet12pred, sanz12jnp},
    month = sep,
    number = {9},
    pages = {2331--70},
    priority = {0},
    title = {Three-systems theory of human visual motion perception: review and update},
    volume = {18},
    year = {2001}
}

@article{Touryan01,
    abstract = {Most high-level sensory neurons have complex, nonlinear response properties; a comprehensive characterization of these properties remains a formidable challenge. Recent studies using complex sensory stimuli combined with linear and nonlinear analyses have provided new insights into the neuronal response properties in various sensory circuits.},
    author = {Touryan, J.},
    citeulike-article-id = {4314519},
    citeulike-linkout-0 = {https://www.incm.cnrs-mrs.fr/biblio\_database/Show?\_id=4bac\&\#38;sort=DEFAULT\&\#38;search=dan\&\#38;hits=13},
    citeulike-linkout-1 = {https://www.incm.cnrs-mrs.fr/biblio\_database/Show?\_id=4bac\&\#38;sort=DEFAULT\&\#38;search=dan\&\#38;hits=13},
    citeulike-linkout-2 = {http://dx.doi.org/10.1016/S0959-4388(00)00232-4},
    date-added = {2011-06-22 09:11:08},
    day = {1},
    doi = {10.1016/S0959-4388(00)00232-4},
    issn = {09594388},
    journal = {Curr Opin Neurobiol},
    keywords = {motion\_clouds, sanz12jnp, sensory},
    month = aug,
    number = {4},
    pages = {443--448},
    priority = {0},
    title = {Analysis of sensory coding with complex stimuli},
    url = {https://www.incm.cnrs-mrs.fr/biblio\_database/Show?\_id=4bac\&\#38;sort=DEFAULT\&\#38;search=dan\&\#38;hits=13},
    volume = {11},
    year = {2001}
}

@article{Rust05,
    abstract = {The visual system evolved to process natural images, and the goal of visual neuroscience is to understand the computations it uses to do this. Indeed the goal of any theory of visual function is a model that will predict responses to any stimulus, including natural scenes. It has, however, recently become common to take this fundamental principle one step further: trying to use photographic or cinematographic representations of natural scenes (natural stimuli) as primary probes to explore visual computations. This approach is both challenging and controversial, and we argue that this use of natural images is so fraught with difficulty that it is not useful. Traditional methods for exploring visual computations that use artificial stimuli with carefully selected properties have been and continue to be the most effective tools for visual neuroscience. The proper use of natural stimuli is to test models based on responses to these synthetic stimuli, not to replace them.},
    author = {Rust, Nicole C. and Movshon, J. Anthony},
    citeulike-article-id = {406497},
    citeulike-linkout-0 = {http://dx.doi.org/10.1038/nn1606},
    citeulike-linkout-1 = {http://dx.doi.org/10.1038/nn1606},
    citeulike-linkout-2 = {http://view.ncbi.nlm.nih.gov/pubmed/16306892},
    citeulike-linkout-3 = {http://www.hubmed.org/display.cgi?uids=16306892},
    date-added = {2011-06-22 09:08:50},
    day = {23},
    doi = {10.1038/nn1606},
    issn = {1097-6256},
    journal = {Nat. Neurosci.},
    keywords = {motion\_clouds, natural\_scenes, sanz12jnp},
    month = nov,
    number = {12},
    pages = {1647--1650},
    pmid = {16306892},
    priority = {0},
    publisher = {Nature Publishing Group},
    title = {In praise of artifice},
    url = {http://dx.doi.org/10.1038/nn1606},
    volume = {8},
    year = {2005}
}

@article{Jacob08,
    abstract = {Rats discriminate objects by scanning their surface with the facial vibrissae, producing spatiotemporally complex sequences of tactile contacts. The way in which the somatosensory cortex responds to these complex multivibrissal stimuli has not been explored. It is unclear yet whether contextual information from across the entire whisker pad influences cortical responses. Here, we delivered tactile stimuli to the rat vibrissae using a new 24 whisker stimulator. We tested sequences of rostrocaudal whisker deflections that generate multivibrissal motion patterns in different directions across the mystacial pad, allowing to disambiguate local from global sensory integration. Unitary electrophysiological recordings from different layers of the barrel cortex showed that a majority of neurons has direction selectivity for the multivibrissal stimulus. The selectivity resulted from nonlinear integration of responses across the mystacial pad. Our results indicate that the system extracts collective properties of a tactile scene.},
    author = {Jacob, V. and {Le Cam}, J. and Ego-Stengel, V. and Shulz, D. E.},
    citeulike-article-id = {3832656},
    citeulike-linkout-0 = {http://www.cell.com/neuron/abstract/S0896-6273(08)00890-8},
    citeulike-linkout-1 = {http://dx.doi.org/10.1016/j.neuron.2008.10.017},
    citeulike-linkout-2 = {http://view.ncbi.nlm.nih.gov/pubmed/19109915},
    citeulike-linkout-3 = {http://www.hubmed.org/display.cgi?uids=19109915},
    date-added = {2011-06-14 12:28:00},
    day = {26},
    doi = {10.1016/j.neuron.2008.10.017},
    issn = {1097-4199},
    journal = {Neuron},
    keywords = {area-s1, neural\_representation, neural\_representations, sanz12jnp},
    month = dec,
    number = {6},
    pages = {1112--1125},
    pmid = {19109915},
    priority = {2},
    publisher = {Cell Press,},
    title = {Emergent Properties of Tactile Scenes Selectively Activate Barrel Cortex Neurons},
    url = {http://dx.doi.org/10.1016/j.neuron.2008.10.017},
    volume = {60},
    year = {2008}
}

@article{Fischer07,
    address = {Hingham, MA, USA},
    author = {Fischer, Sylvain and Vsroubek, Filip and Perrinet, Laurent and Redondo, Rafael and Crist\'{o}bal, Gabriel},
    citeulike-article-id = {2902016},
    citeulike-linkout-0 = {http://portal.acm.org/citation.cfm?id=1286000.1286006},
    citeulike-linkout-1 = {http://dx.doi.org/10.1007/s11263-006-0026-8},
    date-added = {2011-05-18 09:12:21},
    doi = {10.1007/s11263-006-0026-8},
    issn = {0920-5691},
    journal = {Int. J. Comput. Vision},
    keywords = {assofield, log-gabor, motion\_clouds, perrinet11sfn, sanz12jnp, wavelets},
    month = nov,
    number = {2},
    pages = {231--246},
    priority = {0},
    publisher = {Kluwer Academic Publishers},
    title = {{Self-Invertible} {2D} {Log-Gabor} Wavelets},
    url = {http://dx.doi.org/10.1007/s11263-006-0026-8},
    volume = {75},
    year = {2007}
}

@article{Barthelemy08a,
    abstract = {Integrating information is essential to measure the physical {2D} motion of a surface from both ambiguous local {1D} motion of its elongated edges and non-ambiguous {2D} motion of its features such as corners or texture elements. The dynamics of this motion integration shows a complex time course as read from tracking eye movements: first, local {1D} motion signals are extracted and pooled to initiate ocular responses, then {2D} motion signals are integrated to adjust the tracking direction until it matches the surface motion direction. The nature of these {1D} and {2D} motion computations are still unclear. One hypothesis is that their different dynamics may be explained from different contrast sensitivities. To test this, we measured contrast-response functions of early, {1D}-driven and late, {2D}-driven components of ocular following responses to different motion stimuli: gratings, plaids and barberpoles. We found that contrast dynamics of {1D}-driven responses are nearly identical across the different stimuli. On the contrary, late {2D}-driven components with either plaids or barberpoles have similar latencies but different contrast dynamics. Temporal dynamics of both {1D}- and {2D}-driven responses demonstrates that the different contrast gains are set very early during the response time course. Running a Bayesian model of motion integration, we show that a large family of contrast-response functions can be predicted from the probability distributions of {1D} and {2D} motion signals for each stimulus and by the shape of the prior distribution. However, the pure delay (i.e. largely independent upon contrast) observed between {1D}- and {2D}-motion supports the fact that {1D} and {2D} probability distributions are computed independently. This two-pathway Bayesian model supports the idea that {1D} and {2D} mechanisms represent edges and features motion in parallel.},
    author = {Barth\'{e}lemy, Fr\'{e}d\'{e}ric V. and Perrinet, Laurent U. and Castet, Eric and Masson, Guillaume S.},
    citeulike-article-id = {8413330},
    citeulike-linkout-0 = {http://dx.doi.org/10.1016/j.visres.2007.10.020},
    citeulike-linkout-1 = {http://view.ncbi.nlm.nih.gov/pubmed/18221979},
    citeulike-linkout-2 = {http://www.hubmed.org/display.cgi?uids=18221979},
    date-added = {2011-05-18 09:08:59},
    doi = {10.1016/j.visres.2007.10.020},
    issn = {0042-6989},
    journal = {Vision research},
    keywords = {bayesian, khoei12jpp, model, motion-clouds, motion\_clouds, motion\_integration, ocular\_following\_response, ofr, perrinet12pred, response\_latency, sanz12jnp},
    month = feb,
    number = {4},
    pages = {501--522},
    pmid = {18221979},
    priority = {0},
    title = {Dynamics of distributed {1D} and {2D} motion representations for short-latency ocular following.},
    url = {http://dx.doi.org/10.1016/j.visres.2007.10.020},
    volume = {48},
    year = {2008}
}

@article{Simoncelli98,
    abstract = {Electrophysiological studies indicate that neurons in the middle temporal ({MT}) area of the primate brain are selective for the velocity of visual stimuli. This paper describes a computational model of {MT} physiology, in which local image velocities are represented via the distribution of {MT} neuronal responses. The computation is performed in two stages, corresponding to neurons in cortical areas V1 and {MT}. Each stage computes a weighted linear sum of inputs, followed by rectification and divisive normalization. V1 receptive field weights are designed for orientation and direction selectivity. {MT} receptive field weights are designed for velocity (both speed and direction) selectivity. The paper includes computational simulations accounting for a wide range of physiological data, and describes experiments that could be used to further test and refine the model.},
    author = {Simoncelli, Eero P. and Heeger, David J.},
    citeulike-article-id = {3479383},
    citeulike-linkout-0 = {http://www.sciencedirect.com/science?\_ob=MiamiImageURL\&\#38;\_cid=271122\&\#38;\_user=125795\&\#38;\_pii=S0042698997001831\&\#38;\_check=y\&\#38;\_origin=\&\#38;\_coverDate=31-Mar-1998\&\#38;view=c\&\#38;wchp=dGLbVBA-zSkWA\&\#38;md5=66fbdfa4194480f1b9dba0189ad567a4/1-s2.0-S0042698997001831-main.pdf},
    citeulike-linkout-1 = {http://dx.doi.org/10.1016/S0042-6989(97)00183-1},
    citeulike-linkout-2 = {http://www.ingentaconnect.com/content/els/00426989/1998/00000038/00000005/art00183},
    date-added = {2011-05-12 13:50:45},
    doi = {10.1016/S0042-6989(97)00183-1},
    issn = {0042-6989},
    journal = {Vision Research},
    keywords = {khoei12jpp, motion\_clouds, perrinet12pred, sanz12jnp},
    month = mar,
    number = {5},
    pages = {743--761},
    priority = {2},
    publisher = {Elsevier},
    title = {A model of neuronal responses in visual area {MT}},
    url = {http://www.sciencedirect.com/science?\_ob=MiamiImageURL\&\#38;\_cid=271122\&\#38;\_user=125795\&\#38;\_pii=S0042698997001831\&\#38;\_check=y\&\#38;\_origin=\&\#38;\_coverDate=31-Mar-1998\&\#38;view=c\&\#38;wchp=dGLbVBA-zSkWA\&\#38;md5=66fbdfa4194480f1b9dba0189ad567a4/1-s2.0-S0042698997001831-main.pdf},
    volume = {38},
    year = {1998}
}

@article{Simoncelli01a,
    author = {Simoncelli, Eero P. and Heeger, D. J.},
    citeulike-article-id = {9287291},
    date-added = {2011-05-12 13:47:48},
    journal = {Nat. Neurosci.},
    keywords = {motion\_clouds, sanz12jnp},
    pages = {461--2},
    priority = {2},
    title = {Representing retinal image speed in visual cortex},
    volume = {4},
    year = {2001}
}

@article{Vinje00,
    abstract = {Theoretical studies suggest that primary visual cortex (area V1) uses a sparse code to efficiently represent natural scenes. This issue was investigated by recording from V1 neurons in awake behaving macaques during both free viewing of natural scenes and conditions simulating natural vision. Stimulation of the nonclassical receptive field increases the selectivity and sparseness of individual V1 neurons, increases the sparseness of the population response distribution, and strongly decorrelates the responses of neuron pairs. These effects are due to both excitatory and suppressive modulation of the classical receptive field by the nonclassical receptive field and do not depend critically on the spatiotemporal structure of the stimuli. During natural vision, the classical and nonclassical receptive fields function together to form a sparse representation of the visual world. This sparse code may be computationally efficient for both early vision and higher visual processing.},
    address = {Program in Neuroscience, Department of Molecular and Cellular Biology, and Department of Psychology, University of California at Berkeley, Berkeley, CA 94720-1650, USA.},
    author = {Vinje, W. E. and Gallant, Jack L.},
    citeulike-article-id = {762544},
    citeulike-linkout-0 = {http://dx.doi.org/10.1126/science.287.5456.1273},
    citeulike-linkout-1 = {http://dx.doi.org/10.1126/science.287.5456.1273},
    citeulike-linkout-2 = {http://www.sciencemag.org/content/287/5456/1273.abstract},
    citeulike-linkout-3 = {http://www.sciencemag.org/content/287/5456/1273.full.pdf},
    citeulike-linkout-4 = {http://www.sciencemag.org/cgi/content/abstract/287/5456/1273},
    citeulike-linkout-5 = {http://view.ncbi.nlm.nih.gov/pubmed/10678835},
    citeulike-linkout-6 = {http://www.hubmed.org/display.cgi?uids=10678835},
    date-added = {2011-05-12 13:41:27},
    day = {18},
    doi = {10.1126/science.287.5456.1273},
    issn = {0036-8075},
    journal = {Science},
    keywords = {motion\_clouds, natural\_scenes, sanz12jnp},
    month = feb,
    number = {5456},
    pages = {1273--1276},
    pmid = {10678835},
    priority = {2},
    title = {Sparse Coding and Decorrelation in Primary Visual Cortex During Natural Vision},
    url = {http://dx.doi.org/10.1126/science.287.5456.1273},
    volume = {287},
    year = {2000}
}

@article{Knierim92,
    abstract = {1. We recorded responses from neurons in area V1 of the alert macaque monkey to textured patterns modeled after stimuli used in psychophysical experiments of pop-out. Neuronal responses to a single oriented line segment placed within a cell's classical receptive field ({CRF}) were compared with responses in which the center element was surrounded by rings of elements placed entirely outside the {CRF}. The orientations of the surround elements either matched the center element, were orthogonal to it, or were random. 2. The addition of the textured surround tended to suppress the response to the center element by an average of 34\%. Overall, almost 80\% of the 122 cells analyzed in detail were significantly suppressed by at least one of the texture surrounds. 3. Cells tended to respond more strongly to a stimulus in which there was a contrast in orientation between the center and surround than to a stimulus lacking such contrast. The average difference was 9\% of the response to the optimally oriented center element alone. For the 32\% of the cells showing a statistically significant orientation contrast effect, the average difference was 28\%. 4. Both the general suppression and orientation contrast effects originated from surround regions at the ends of the center bar as well as regions along the sides of the center bar. 5. The amount of suppression induced by the texture surround decreased as the density of the texture elements decreased. 6. Both the general suppression and the orientation contrast effects appeared early in the population response to the stimuli. The general suppression effect took approximately 7 ms to develop, whereas the orientation contrast effect took 18-20 ms to develop. 7. These results are consistent with a possible functional role of V1 cells in the mediation of perceptual pop-out and in the segregation of texture borders. Possible anatomic substrates of the effects are discussed.},
    address = {Division of Biology, California Institute of Technology, Pasadena 91125.},
    author = {Knierim, J. J. and van Essen, D. C.},
    citeulike-article-id = {938850},
    citeulike-linkout-0 = {http://jn.physiology.org/content/67/4/961.abstract},
    citeulike-linkout-1 = {http://jn.physiology.org/content/67/4/961.abstract},
    citeulike-linkout-2 = {http://jn.physiology.org/content/67/4/961.full.pdf},
    citeulike-linkout-3 = {http://jn.physiology.org/cgi/content/abstract/67/4/961},
    citeulike-linkout-4 = {http://view.ncbi.nlm.nih.gov/pubmed/1588394},
    citeulike-linkout-5 = {http://www.hubmed.org/display.cgi?uids=1588394},
    date-added = {2011-05-12 13:25:17},
    day = {1},
    issn = {0022-3077},
    journal = {J. Neurophysiol.},
    keywords = {motion\_clouds, sanz12jnp},
    month = apr,
    number = {4},
    pages = {961--980},
    pmid = {1588394},
    priority = {2},
    title = {Neuronal responses to static texture patterns in area {V1} of the alert macaque monkey},
    url = {http://jn.physiology.org/content/67/4/961.abstract},
    volume = {67},
    year = {1992}
}

@article{Carandini05,
    author = {Carandini, M. and Demb, J. B. and Mante, V. and Tolhurst, D. J. and Dan, Yang and Olshausen, Bruno A. and Gallant, Jack L. and Rust, N. C.},
    citeulike-article-id = {9286868},
    date-added = {2011-05-12 12:52:53},
    journal = {J. Neurosci.},
    keywords = {motion\_clouds, sanz12jnp},
    month = nov,
    number = {46},
    pages = {10577--97},
    priority = {2},
    title = {Do we know what the early visual system does?},
    volume = {25},
    year = {2005}
}

@article{Field94,
    abstract = {A number of recent attempts have been made to describe early sensory coding in terms of a general information processing strategy. In this paper, two strategies are contrasted. Both strategies take advantage of the redundancy in the environment to produce more effective representations. The first is described as a ?compact? coding scheme. A compact code performs a transform that allows the input to be represented with a reduced number of vectors (cells) with minimal {RMS} error. This approach has recently become popular in the neural network literature and is related to a process called Principal Components Analysis ({PCA}). A number of recent papers have suggested that the optimal ?compact? code for representing natural scenes will have units with receptive field profiles much like those found in the retina and primary visual cortex. However, in this paper, it is proposed that compact coding schemes are insufficient to account for the receptive field properties of cells in the mammalian visual pathway. In contrast, it is proposed that the visual system is near to optimal in representing natural scenes only if optimality is defined in terms of ?sparse distributed? coding. In a sparse distributed code, all cells in the code have an equal response probability across the class of images but have a low response probability for any single image. In such a code, the dimensionality is not reduced. Rather, the redundancy of the input is transformed into the redundancy of the firing pattern of cells. It is proposed that the signature for a sparse code is found in the fourth moment of the response distribution (i.e., the kurtosis). In measurements with 55 calibrated natural scenes, the kurtosis was found to peak when the bandwidths of the visual code matched those of cells in the mammalian visual cortex. Codes resembling ?wavelet transforms? are proposed to be effective because the response histograms of such codes are sparse (i.e., show high kurtosis) when presented with natural scenes. It is proposed that the structure of the image that allows sparse coding is found in the phase spectrum of the image. It is suggested that natural scenes, to a first approximation, can be considered as a sum of self-similar local functions (the inverse of a wavelet). Possible reasons for why sensory systems would evolve toward sparse coding are presented. A number of recent attempts have been made to describe early sensory coding in terms of a general information processing strategy. In this paper, two strategies are contrasted. Both strategies take advantage of the redundancy in the environment to produce more effective representations. The first is described as a ?compact? coding scheme. A compact code performs a transform that allows the input to be represented with a reduced number of vectors (cells) with minimal {RMS} error. This approach has recently become popular in the neural network literature and is related to a process called Principal Components Analysis ({PCA}). A number of recent papers have suggested that the optimal ?compact? code for representing natural scenes will have units with receptive field profiles much like those found in the retina and primary visual cortex. However, in this paper, it is proposed that compact coding schemes are insufficient to account for the receptive field properties of cells in the mammalian visual pathway. In contrast, it is proposed that the visual system is near to optimal in representing natural scenes only if optimality is defined in terms of ?sparse distributed? coding. In a sparse distributed code, all cells in the code have an equal response probability across the class of images but have a low response probability for any single image. In such a code, the dimensionality is not reduced. Rather, the redundancy of the input is transformed into the redundancy of the firing pattern of cells. It is proposed that the signature for a sparse code is found in the fourth moment of the response distribution (i.e., the kurtosis). In measurements with 55 calibrated natural scenes, the kurtosis was found to peak when the bandwidths of the visual code matched those of cells in the mammalian visual cortex. Codes resembling ?wavelet transforms? are proposed to be effective because the response histograms of such codes are sparse (i.e., show high kurtosis) when presented with natural scenes. It is proposed that the structure of the image that allows sparse coding is found in the phase spectrum of the image. It is suggested that natural scenes, to a first approximation, can be considered as a sum of self-similar local functions (the inverse of a wavelet). Possible reasons for why sensory systems would evolve toward sparse coding are presented.},
    address = {Cambridge, MA, USA},
    author = {Field, D. J.},
    citeulike-article-id = {3345508},
    citeulike-linkout-0 = {http://portal.acm.org/citation.cfm?id=188132.188136},
    citeulike-linkout-1 = {http://dx.doi.org/10.1162/neco.1994.6.4.559},
    citeulike-linkout-2 = {http://www.mitpressjournals.org/doi/abs/10.1162/neco.1994.6.4.559},
    date-added = {2011-05-12 11:36:30},
    day = {1},
    doi = {10.1162/neco.1994.6.4.559},
    issn = {0899-7667},
    journal = {Neural Comput.},
    keywords = {motion\_clouds, sanz12jnp},
    month = jul,
    number = {4},
    pages = {559--601},
    priority = {2},
    publisher = {MIT Press},
    title = {What Is the Goal of Sensory Coding?},
    url = {http://dx.doi.org/10.1162/neco.1994.6.4.559},
    volume = {6},
    year = {1994}
}

@article{Lewis84,
    abstract = {The problem of digital painting is considered from a signal processing viewpoint, and is reconsidered as a problem of directed texture synthesis. It is an important characteristic of natural texture that detail may be evident at many scales, and the detail at each scale may have distinct characteristics. A '' sparse convolution'' procedure for generating random textures with arbitrary spectral content is described. The capability of specifying the texture spectrum (and thus the amount of detail at each scale) is an improvement over stochastic texture synthesis processes which are scalebound or which have a prescribed 1/f spectrum. This spectral texture synthesis procedure provides the basis for a digital paint system which rivals the textural sophistication of traditional artistic media. Applications in terrain synthesis and texturing computer-rendered objects are also shown.},
    address = {New York, NY, USA},
    author = {Lewis, J. P.},
    citeulike-article-id = {8876296},
    citeulike-linkout-0 = {http://portal.acm.org/citation.cfm?id=800031.808605},
    citeulike-linkout-1 = {http://dx.doi.org/10.1145/964965.808605},
    date-added = {2011-05-12 11:22:01},
    doi = {10.1145/964965.808605},
    issn = {0097-8930},
    journal = {ACM SIGGRAPH},
    keywords = {motion\_clouds, sanz12jnp},
    month = jan,
    pages = {245--252},
    priority = {4},
    publisher = {ACM},
    title = {Texture synthesis for digital painting},
    url = {http://dx.doi.org/10.1145/964965.808605},
    volume = {18},
    year = {1984}
}

@article{Essock09,
    abstract = {Broadband oriented-noise masks were used to assess the orientation properties of spatial-context suppression in 'general' viewing conditions (i.e., a fixated, large field of 'naturalistic' noise). Suppression was orientation-tuned with a Gaussian shape and bandwidth of 40\\\\$\\\\\\\\,^{\\\\\\\\circ}\\\\$ that was consistent across test orientation (0\\\\$\\\\\\\\,^{\\\\\\\\circ}\\\\$, 45\\\\$\\\\\\\\,^{\\\\\\\\circ}\\\\$, 90\\\\$\\\\\\\\,^{\\\\\\\\circ}\\\\$, and 135\\\\$\\\\\\\\,^{\\\\\\\\circ}\\\\$). Strength of suppression was highly anisotropic following a '' horizontal effect'' pattern (strongest suppression at horizontal and least suppression at oblique test orientations). Next, the time course of anisotropic masking was investigated by varying stimulus onset asynchrony ({SOA}). A standard '' oblique effect'' anisotropy is observed at long {SOAs} but becomes a '' horizontal effect'' when a noise mask is present within approximately 50 ms of the test onset. The orientation-tuned masking appears to result from an anisotropic gain-control mechanism that pools the weighted responses to the broadband mask, resulting in a changeover from oblique effect to horizontal effect. In addition, the relative magnitude of suppression at the orientations tested corresponds to the relative magnitudes of the content of typical natural scenes at the same orientations. We suggest that this anisotropic suppression may serve to equalize the visual system's response across orientation when viewing typical natural scenes, 'discounting' the anisotropy of typical natural scene content.},
    author = {Essock, E. A. and Haun, A. M. and Kim, Y. J.},
    citeulike-article-id = {9277851},
    citeulike-linkout-0 = {http://dx.doi.org/10.1167/9.1.35},
    citeulike-linkout-1 = {http://www.journalofvision.org/content/9/1/35.abstract},
    citeulike-linkout-2 = {http://www.journalofvision.org/content/9/1/35.full.pdf},
    citeulike-linkout-3 = {http://view.ncbi.nlm.nih.gov/pubmed/19271905},
    citeulike-linkout-4 = {http://www.hubmed.org/display.cgi?uids=19271905},
    date-added = {2011-05-11 18:33:38},
    day = {23},
    doi = {10.1167/9.1.35},
    journal = {J. Vis.},
    keywords = {motion\_clouds, sanz12jnp},
    month = jan,
    number = {1},
    pmid = {19271905},
    priority = {5},
    title = {An anisotropy of orientation-tuned suppression that matches the anisotropy of typical natural scenes},
    url = {http://dx.doi.org/10.1167/9.1.35},
    volume = {9},
    year = {2009}
}

@article{Watson95,
    abstract = {Contrast energy thresholds were measured for discriminating the direction of a drifting sinusoidal grating multiplied by an independently drifting space-time Gaussian (a generalized Gabor). We argue that the stimulus with the lowest contrast energy threshold identifies the receptive field of the most efficient linear motion filter. This optimal motion stimulus is found to be at 3 c/deg and 5 Hz, with a width and height of 0.44 deg and a duration of 0.133 sec, corresponding to spatial and temporal bandwidths of 1.1 and 2.5 octaves, respectively. The spectral receptive field is aligned more nearly to the Cartesian axes than to the velocity contour.},
    author = {Watson, A. and Turano, K.},
    citeulike-article-id = {9277640},
    citeulike-linkout-0 = {http://dx.doi.org/10.1016/0042-6989(94)00182-L},
    date-added = {2011-05-11 18:00:09},
    doi = {10.1016/0042-6989(94)00182-L},
    issn = {00426989},
    journal = {Vision Res.},
    keywords = {motion\_clouds, sanz12jnp},
    month = feb,
    number = {3},
    pages = {325--336},
    priority = {2},
    title = {The optimal motion stimulus},
    url = {http://dx.doi.org/10.1016/0042-6989(94)00182-L},
    volume = {35},
    year = {1995}
}

@article{Field87,
    author = {Field, D. J.},
    citeulike-article-id = {9277525},
    date-added = {2011-05-11 17:27:47},
    journal = {J. Opt. Soc. Am. A},
    keywords = {assofield, images, motion\_clouds, natural, natural\_scenes, perrinet11sfn, sanz12jnp},
    pages = {2379--2394},
    priority = {5},
    title = {Relations between the statistics of natural images and the response properties of cortical cells},
    volume = {4},
    year = {1987}
}

@article{Campbell68,
    abstract = {1. The contrast thresholds of a variety of grating patterns have been measured over a wide range of spatial frequencies.2. Contrast thresholds for the detection of gratings whose luminance profiles are sine, square, rectangular or saw-tooth waves can be simply related using Fourier theory.3. Over a wide range of spatial frequencies the contrast threshold of a grating is determined only by the amplitude of the fundamental Fourier component of its wave form.4. Gratings of complex wave form cannot be distinguished from sine-wave gratings until their contrast has been raised to a level at which the higher harmonic components reach their independent threshold.5. These findings can be explained by the existence within the nervous system of linearly operating independent mechanisms selectively sensitive to limited ranges of spatial frequencies.},
    author = {Campbell, F. W. and Robson, J. G.},
    citeulike-article-id = {9277524},
    citeulike-linkout-0 = {http://jp.physoc.org/content/197/3/551.abstract},
    comment = { * finds that for complex gratings, the fundamental (that is the sinusoid) determines contrast threshold
 * deduces that there independent channels with different frequency tunings },
    date-added = {2011-05-11 17:27:47},
    eprint = {http://jp.physoc.org/content/197/3/551.full.pdf+html},
    journal = {J. Physiol. Paris},
    keywords = {energy, model, motion\_clouds, motion\_energy\_model, sanz12jnp},
    number = {3},
    pages = {551--566},
    priority = {5},
    title = {Application of {F}ourier analysis to the visibility of gratings},
    url = {http://jp.physoc.org/content/197/3/551.abstract},
    volume = {197},
    year = {1968}
}

@article{Braun00,
    abstract = {A new stimulus display reveals that humans summate the motion energies of all components consistent with a single velocity, rather than optimizing sensitivity by ignoring noise.},
    author = {Braun, J.},
    citeulike-article-id = {9277523},
    comment = {Perspective paper on Scharter's paper},
    date-added = {2011-05-11 17:27:47},
    journal = {Nat. Neurosci.},
    keywords = {motion, motion\_clouds, sanz12jnp},
    pages = {9--11},
    priority = {5},
    title = {Targeting Visual Motion},
    volume = {3},
    year = {2000}
}

@article{Sajda04,
    address = {Oxford, UK, UK},
    author = {Sajda, Paul and Baek, Kyungim},
    citeulike-article-id = {9277522},
    citeulike-linkout-0 = {http://dx.doi.org/10.1016/j.neunet.2004.03.013},
    citeulike-linkout-1 = {http://portal.acm.org/citation.cfm?id=1046876.1046891},
    date-added = {2011-05-11 17:27:47},
    doi = {10.1016/j.neunet.2004.03.013},
    journal = {Neural Networks},
    keywords = {aperture-problem, form-and-motion-integration, generative, generative-model, generative\_model, hypercolumn, integration, model, motion, motion\_clouds, occlusion, perrinet12pred, problem, sanz12jnp},
    month = jun,
    pages = {809--821},
    priority = {5},
    publisher = {Elsevier Science Ltd.},
    title = {Integration of form and motion within a generative model of visual cortex},
    url = {http://portal.acm.org/citation.cfm?id=1046876.1046891},
    volume = {17},
    year = {2004}
}

@article{Tsuchiya07,
    abstract = {We compare luminance-contrast-masking thresholds for fully and poorly attended stimuli, controlling attention with a demanding concurrent task. We use dynamic displays composed of discrete spatiotemporal wavelets, comparing three conditions ("single," "parallel," and "random"). In contrast to static displays, we do not find that attention modulates the "dipper" regime for masks of low luminance contrast. Nor does attention alter direction-selective masking by multiple wavelets moving in random directions, a condition designed to isolate effects on component motion. However, direction-selective masking by multiple wavelets moving in parallel is significantly reduced by attention. As the latter condition is expected to excite both component and pattern motion mechanisms, this implies that attention may alter the visual representation of pattern motion. In addition, attention exhibits its well-known effect of reducing lateral masking between nearby spatiotemporal wavelets.},
    author = {Tsuchiya, N. and Braun, J.},
    citeulike-article-id = {9277521},
    date-added = {2011-05-11 17:27:47},
    journal = {J. Vis.},
    keywords = {component, component-motion, motion, motion\_clouds, motion\_component\_cell, motion\_pattern\_cell, pattern, pattern-motion, sanz12jnp},
    number = {3},
    pages = {1},
    priority = {5},
    title = {Contrast thresholds for component motion with full and poor attention.},
    volume = {7},
    year = {2007}
}

@techreport{Lagae09,
    author = {Lagae, A. and Lefebvre, S. and Drettakis, G. and Dutr{\'{e}}, P.},
    citeulike-article-id = {9277519},
    date-added = {2011-05-11 17:27:46},
    institution = {Department of Computer Science, K.U.Leuven},
    keywords = {motion\_clouds, sanz12jnp, texture},
    number = {Report CW 545},
    priority = {5},
    title = {Procedural Noise using Sparse Gabor Convolution},
    year = {2009}
}

@article{Galerne10,
    author = {Galerne, B. and Gousseau, Y. and Morel, J. M.},
    citeulike-article-id = {9277518},
    citeulike-linkout-0 = {http://www.biomedsearch.com/nih/Random-Phase-Textures-Theory-Synthesis/20550995.html},
    date-added = {2011-05-11 17:27:46},
    journal = {IEEE T. Image. Process.},
    keywords = {motion\_clouds, sanz12jnp, texture},
    priority = {5},
    title = {Random {P}hase {T}extures: Theory and Synthesis.},
    url = {http://www.biomedsearch.com/nih/Random-Phase-Textures-Theory-Synthesis/20550995.html},
    year = {2010}
}

@book{Marr83,
    abstract = {A computational investigation into the human representation and processing of visual information.},
    author = {Marr, D.},
    citeulike-article-id = {9277517},
    citeulike-linkout-0 = {http://www.worldcat.org/isbn/0716715678},
    date-added = {2011-05-11 17:27:46},
    day = {15},
    howpublished = {Paperback},
    keywords = {artificial-intelligence, book, motion, motion-perception, motion\_clouds, motion\_perception, perception, sanz12jnp, vision},
    month = jun,
    priority = {4},
    publisher = {Henry Holt \& Company},
    title = {Vision: A Computational Investigation into the Human Representation and Processing of Visual Information},
    url = {http://www.worldcat.org/isbn/0716715678},
    year = {1983}
}

@article{Chen05,
    abstract = {Visual motion is sensed by low-level (energy-based) and high-level (feature-based) mechanisms. Our interest is in the motion detectors underlying the initial ocular following responses ({OFR}) that are elicited at ultrashort latencies by sudden motions of large images. {OFR} were elicited in humans by applying horizontal motion to vertical square-wave gratings lacking the fundamental. In the frequency domain, a pure square wave is composed of the odd harmonics--first, third, fifth, seventh, etc.--such that the third, fifth, seventh, etc., have amplitudes that are one-third, one-fifth, one-seventh, etc., that of the first, and the missing fundamental stimulus lacks the first harmonic. Motion consisted of successive quarter-wavelength steps, so the features and 4n+1 harmonics (where n = integer) shifted forward, whereas the 4n-1 harmonics--including the strongest Fourier component (the third harmonic)--shifted backward (spatial aliasing). Thus, the net Fourier energy and the {non-Fourier} features moved in opposite directions. Initial {OFR}, recorded with the search coil technique, had minimum latencies of 60 to 70 ms and were always in the direction of the third harmonic, for example, leftward steps resulted in rightward {OFR}. Thus, the earliest {OFR} were strongly dependent on the motion of the major Fourier component, consistent with mediation by oriented spatiotemporal visual filters as in the well-known energy model of motion detection. Introducing interstimulus intervals of 10 to 100 ms (during which the screen was uniform gray) reversed the initial direction of tracking, consistent with extensive neurophysiological and psychophysical data suggesting that the visual input to the motion detectors has a biphasic temporal impulse response.},
    author = {Chen, K. J. and Sheliga, B. M. and Fitzgibbon, E. J. and Miles, F. A.},
    citeulike-article-id = {9277516},
    date-added = {2011-05-11 17:27:46},
    journal = {Ann N Y Acad Sci},
    keywords = {motion\_clouds, sanz12jnp},
    month = apr,
    pages = {260--271},
    priority = {5},
    title = {Initial ocular following in humans depends critically on the fourier components of the motion stimulus},
    volume = {1039},
    year = {2005}
}

@article{Del-Viva98,
    abstract = {We have developed a two-stage model of motion perception that identifies moving spatial features and computes their velocity, achieving both high spatial localisation and reliable estimates of velocity. Features are detected in each frame by locating the peaks of the spatial local energy functions, as for stationary images (Morrone {MC} and Burr {DC}. Proc R Soc Lond {1988;B235}:221-245.). The energy functions are calculated for different scales and orientations, and integrated within a temporal Gaussian window. The velocity of features is determined by the direction of maximal elongation of the energy in space-time, evaluated by calculating the three characteristic curvatures of the energy at each feature point. To circumvent the aperture problem, the energy maps are blurred in space by various amounts, and velocity is computed separately for each spatial blur. The Weber fraction of the local curvatures (curvature contrast) describes the spatio-temporal energy elongation at each feature point, giving a reliability index for each velocity estimate. For each point, the velocity of the spatial blur that yielded the highest curvature contrast was selected, with no further constraints, such as rigidity of motion. Dynamic recruitment of operators of different size allows maximum flexibility of the analysis, allowing it to simulate human visual performance in the detection of noise images, transparent motion, some motion illusions, and second-order motion.},
    address = {Istituto di Neurofisiologia del CNR, Pisa, Italy},
    author = {{Del Viva}, M. M. and Morrone, M. C.},
    citeulike-article-id = {9277515},
    date-added = {2011-05-11 17:27:46},
    journal = {Vision Res.},
    keywords = {motion, motion\_clouds, sanz12jnp},
    number = {22},
    pages = {3633--53},
    priority = {5},
    title = {Motion analysis by feature tracking},
    volume = {38},
    year = {1998}
}

@article{Sheliga08,
    author = {Sheliga, B. M. and FitzGibbon, E. J. and Miles, Fred A.},
    citeulike-article-id = {9277514},
    date-added = {2011-05-11 17:27:46},
    journal = {Vision Res.},
    keywords = {apparent, apparent\_motion, control, divisive, divisive\_normalization, energy, fourier, fundamental, gain, gain\_control, gratings, integration, missing, models, motion, motion\_clouds, motion\_energy\_model, motion\_integration, normalization, ocular\_following\_response, ofr, sanz12jnp, spatial, spatial\_summation, summation, vision, visual, winner-take-all},
    pages = {1758--76},
    priority = {5},
    title = {Spatial summation properties of the human ocular following response ({OFR}): evidence for nonlinearities due to local ang global inhibitory interactions},
    volume = {48},
    year = {2008}
}

@article{Sheliga05,
    author = {Sheliga, B. M. and Chen, K. J. and Ej, FitzGibbon and Miles, Fred A.},
    citeulike-article-id = {9277513},
    date-added = {2011-05-11 17:27:46},
    journal = {Vision Res.},
    keywords = {motion\_clouds, ocular\_following\_response, ofr, sanz12jnp},
    pages = {3307--21},
    priority = {5},
    title = {Ocular following in humans: A response to first-order motion energy.},
    volume = {45},
    year = {2005}
}

@article{Miura06,
    author = {Miura, K. and Matsuura, K. and Taki, M. and Tabata, H. and Inaba and Kawano, K. and Miles, Fred A.},
    citeulike-article-id = {9277512},
    date-added = {2011-05-11 17:27:45},
    journal = {Vision Res.},
    keywords = {contrast, control, detectors, filtering, following, fourier, fundamental, gain, gain\_control, linear, linear-motion, missing, missing-fundamental, monkeys, motion, motion-energy, motion\_1st-order, motion\_clouds, motion\_energy\_model, ocular, ocular-following, ocular\_following\_response, ofrs, sanz12jnp, spatio-temporal, spatiotemporal, spatiotemporal-filtering},
    pages = {869--78},
    priority = {5},
    title = {The visual motion detectors underlying ocular following responses in monkeys},
    volume = {46},
    year = {2006}
}

@article{Perrinet07neurocomp,
    abstract = {The machinery behind the visual perception of motion and the subsequent sensori-motor transformation, such as in Ocular Following Response (OFR), is confronted to uncertainties which are efficiently resolved in the primate's visual system. We may understand this response as an ideal observer in a probabilis- tic framework by using Bayesian theory (Weiss et al., 2002) which we previously proved to be successfully adapted to model the OFR for different levels of noise with full field gratings (Perrinet et al., 2005). More recent experiments of OFR have used disk gratings and bipartite stimuli which are optimized to study the dy- namics of center-surround integration. We quantified two main characteristics of the spatial integration of motion : (i) a finite optimal stimulus size for driving OFR, surrounded by an antagonistic modulation and (ii) a direction selective sup- pressive effect of the surround on the contrast gain control of the central stim- uli (Barth{\'{e}}lemy et al., 2006). Herein, we extended the ideal observer model to simulate the spatial integration of the different local motion cues within a proba- bilistic representation. We present analytical results which show that the hypoth- esis of independence of local measures can describe the integration of the spatial motion signal. Within this framework, we successfully accounted for the con- trast gain control mechanisms observed in the behavioral data for center-surround stimuli. However, another inhibitory mechanism had to be added to account for suppressive effects of the surround.},
    annote = {Corresponding author:Dr. Laurent U. Perrinet Journal:Journal of Physiology - Paris Our Reference:PHYSIO319},
    author = {Perrinet, Laurent U. and Masson, Guillaume S.},
    citeulike-article-id = {9277511},
    date-added = {2011-05-11 17:27:45},
    journal = {Journal of {P}hysiology ({P}aris)},
    keywords = {bayesian, bayesian-model, center-surround, center-surround-interactions, eye, integration, interactions, model, motion, motion-clouds, motion-integration, movements, ocular-following-re--sponse, ocular-following-response, ofr, perception, perrinet12pred, sanz12jnp, tracking, tracking-eye-movements, vision, visual, visual-perception},
    number = {1--3},
    pages = {46--55},
    priority = {5},
    title = {Modeling spatial integration in the ocular following response using a probabilistic framework},
    volume = {101},
    year = {2007}
}

@inproceedings{Simoncini10vss,
    author = {Simoncini, C. and Perrinet, L. U. and Montagnini, Anna and Mamassian, P. and Masson, Guillaume S.},
    booktitle = {Vision Science Society},
    citeulike-article-id = {9277510},
    date-added = {2011-05-11 17:27:45},
    keywords = {motion\_clouds, sanz12jnp},
    number = {43.503},
    priority = {5},
    title = {Different pooling of motion information for perceptual speed discrimination and behavioral speed estimation},
    year = {2010}
}

@article{Webb07,
    author = {Webb, B. S. and Ledgeway, T. and McGraw, P. V.},
    citeulike-article-id = {9277509},
    date-added = {2011-05-11 17:27:45},
    journal = {Proc. Natl. Acad. Sci. U. S. A.},
    keywords = {motion\_clouds, sanz12jnp},
    number = {9},
    pages = {3532},
    priority = {5},
    publisher = {National Academy Sciences},
    title = {Cortical pooling algorithms for judging global motion direction},
    volume = {104},
    year = {2007}
}

@article{Lagae09a,
    author = {Lagae, A. and Lefebvre, S. and Drettakis, G. and Dutr{\'{e}}, P.},
    citeulike-article-id = {9277507},
    date-added = {2011-05-11 17:27:45},
    journal = {ACM SIGGRAPH},
    keywords = {motion\_clouds, sanz12jnp, texture},
    month = aug,
    number = {3},
    priority = {5},
    title = {Procedural Noise using Sparse Gabor Convolution},
    volume = {28},
    year = {2009}
}

@article{Peyre09,
    address = {New York, NY, USA},
    author = {Peyr{\'{e}}, G.},
    citeulike-article-id = {9277506},
    date-added = {2011-05-11 17:27:45},
    journal = {Comput. Vis. Image Und.},
    keywords = {manifolds, motion\_clouds, sanz12jnp},
    number = {2},
    pages = {249--260},
    priority = {5},
    publisher = {Elsevier Science Inc.},
    title = {Manifold models for signals and images},
    volume = {113},
    year = {2009}
}

@article{Peyre10,
    address = {Los Alamitos, CA, USA},
    author = {Peyr{\'{e}}, G.},
    citeulike-article-id = {9277505},
    date-added = {2011-05-11 17:27:45},
    journal = {IEEE T. Pattern. Anal.},
    keywords = {manifolds, motion\_clouds, sanz12jnp, texture},
    pages = {733--746},
    priority = {5},
    publisher = {IEEE Computer Society},
    title = {Texture Synthesis with Grouplets},
    volume = {32},
    year = {2010}
}

@article{Stocker05,
    author = {Stocker, A. and Simoncelli, Eero P.},
    citeulike-article-id = {9277502},
    date-added = {2011-05-11 17:27:45},
    journal = {Adv Neural Inf Process},
    keywords = {motion, motion-perception, motion\_clouds, motion\_perception, perception, sanz12jnp},
    pages = {1361--1368},
    priority = {5},
    title = {Constraining a Bayesian Model of Human Visual Speed Perception},
    volume = {17},
    year = {2005}
}

@article{Watson85,
    author = {Watson, A. B.},
    citeulike-article-id = {9277501},
    date-added = {2011-05-11 17:27:45},
    journal = {J. Opt. Soc. Am. A},
    keywords = {model, motion, motion\_clouds, sanz12jnp, vision, visual},
    priority = {5},
    title = {Model of human visual-motion sensing},
    volume = {2},
    year = {1985}
}

@article{Heeger87,
    author = {Heeger, D. J.},
    citeulike-article-id = {9277500},
    date-added = {2011-05-11 17:27:45},
    journal = {J. Opt. Soc. Am. A},
    keywords = {flow, image, image-flow, motion\_clouds, sanz12jnp},
    priority = {5},
    title = {Model for the extraction of image flow},
    year = {1987}
}

@article{Heeger96,
    abstract = {The visual responses of neurons in the cerebral cortex were first adequately characterized in the 1960s by D. H. Hubel and T. N. Wiesel [(1962) J. Physiol. (London) 160, 106-154; (1968) J. Physiol. (London) 195, 215-243] using qualitative analyses based on simple geometric visual targets. Over the past 30 years, it has become common to consider the properties of these neurons by attempting to make formal descriptions of these transformations they execute on the visual image. Most such models have their roots in linear-systems approaches pioneered in the retina by C. {Enroth-Cugell} and J. R. Robson [(1966) J. Physiol. (London) 187, 517-552], but it is clear that purely linear models of cortical neurons are inadequate. We present two related models: one designed to account for the responses of simple cells in primary visual cortex (V1) and one designed to account for the responses of pattern direction selective cells in {MT} (or V5), an extrastriate visual area thought to be involved in the analysis of visual motion. These models share a common structure that operates in the same way on different kinds of input, and instantiate the widely held view that computational strategies are similar throughout the cerebral cortex. Implementations of these models for Macintosh microcomputers are available and can be used to explore the models' properties.},
    address = {Department of Psychology, Stanford University, CA 94305, USA.},
    author = {Heeger, D. J. and Simoncelli, Eero P. and Movshon, J. Anthony},
    citeulike-article-id = {9277499},
    citeulike-linkout-0 = {http://dx.doi.org/10.1073/pnas.93.2.623},
    date-added = {2011-05-11 17:27:44},
    day = {23},
    doi = {10.1073/pnas.93.2.623},
    journal = {Proc. Natl. Acad. Sci. U. S. A.},
    keywords = {computational, computational\_model, cortex, model, motion\_clouds, normalization, sanz12jnp, vision},
    month = jan,
    number = {2},
    pages = {623--627},
    priority = {2},
    title = {Computational models of cortical visual processing},
    url = {http://dx.doi.org/10.1073/pnas.93.2.623},
    volume = {93},
    year = {1996}
}

@book{Hyvrinen09,
    author = {Hyvrinen, A. and Hurri, J. and Hoyer, P. O.},
    citeulike-article-id = {9277498},
    citeulike-linkout-0 = {http://books.google.com/books?hl=fr\&\#38;lr=\&\#38;id=pq\_Fr1eYr7cC\&\#38;oi=fnd\&\#38;pg=PR5\&\#38;dq=Natural+Image+Statistics:+A+Probabilistic+Approach+to+Early+Computational+Vision\&\#38;ots=LbKdzNpmXr\&\#38;sig=CjjIbe8k9CcZzUW2fzwINtGAJAA\#v=onepage\&\#38;q\&\#38;f=false},
    date-added = {2011-05-11 17:27:44},
    edition = {1st},
    keywords = {images, motion\_clouds, natural, natural\_scenes, sanz12jnp, statistics},
    priority = {5},
    publisher = {Springer Publishing Company, Incorporated},
    title = {Natural Image Statistics: A Probabilistic Approach to Early Computational Vision},
    url = {http://books.google.com/books?hl=fr\&\#38;lr=\&\#38;id=pq\_Fr1eYr7cC\&\#38;oi=fnd\&\#38;pg=PR5\&\#38;dq=Natural+Image+Statistics:+A+Probabilistic+Approach+to+Early+Computational+Vision\&\#38;ots=LbKdzNpmXr\&\#38;sig=CjjIbe8k9CcZzUW2fzwINtGAJAA\#v=onepage\&\#38;q\&\#38;f=false},
    year = {2009}
}

@article{Fischer07b,
    author = {Fischer, S. and Redondo, R. and Perrinet, L. U. and Crist{\'{o}}bal, G.},
    citeulike-article-id = {9277497},
    date-added = {2011-05-11 17:27:44},
    journal = {EURASIP J Appl Sig P},
    keywords = {assofield, log-gabor, log\_gabor, motion\_clouds, perrinet11sfn, sanz12jnp},
    number = {1},
    pages = {122},
    priority = {0},
    publisher = {Hindawi Publishing Corp.},
    title = {Sparse approximation of images inspired from the functional architecture of the primary visual areas},
    volume = {2007},
    year = {2007}
}

@article{Oppenheim81,
    author = {Oppenheim, A. and Lim, J.},
    citeulike-article-id = {9277496},
    citeulike-linkout-0 = {http://dx.doi.org/10.1109/PROC.1981.12022},
    comment = {says that you can still recognize the form of an image if you dicard the amplitude spectra, while keeping the phase},
    date-added = {2011-05-11 17:27:44},
    doi = {10.1109/PROC.1981.12022},
    issn = {0018-9219},
    journal = {Proceedings of the IEEE},
    keywords = {assofield, motion\_clouds, perrinet11sfn, phase, sanz12jnp},
    number = {5},
    pages = {529--541},
    priority = {2},
    title = {The importance of phase in signals},
    url = {http://dx.doi.org/10.1109/PROC.1981.12022},
    volume = {69},
    year = {1981}
}

@inproceedings{Gluckman03,
    author = {Gluckman, J.},
    booktitle = {in 3rd International Workshop on Statistical and Computational Theories of Vision},
    citeulike-article-id = {9277495},
    date-added = {2011-05-11 17:27:44},
    keywords = {motion\_clouds, phase, sanz12jnp},
    pages = {2003},
    priority = {5},
    title = {Kurtosis and the Phase Structure of Images},
    year = {2003}
}

@article{Thomson00,
    abstract = {Fourier-phase information is important in determining the appearance of natural scenes, but the structure of natural-image phase spectra is highly complex and difficult to relate directly to human perceptual processes. This problem is addressed by extending previous investigations of human visual sensitivity to the randomisation and quantisation of Fourier phase in natural images. The salience of the image changes induced by these physical processes is shown to depend critically on the nature of the original phase spectrum of each image, and the processes of randomisation and quantisation are shown to be perceptually equivalent provided that they shift image phase components by the same average amount. These results are explained by assuming that the visual system is sensitive to those phase-domain image changes which also alter certain global higher-order image statistics. This assumption may be used to place constraints on the likely nature of cortical processing: mechanisms which correlate the outputs of a bank of relative-phase-sensitive units are found to be consistent with the patterns of sensitivity reported here.},
    author = {Thomson, M. G. A. and Foster, D. H. and Summers, R. J.},
    citeulike-article-id = {9277494},
    citeulike-linkout-0 = {http://www.perceptionweb.com/abstract.cgi?id=p2867},
    date-added = {2011-05-11 17:27:44},
    journal = {Perception},
    keywords = {human, images, motion\_clouds, natural, natural\_scenes, phase, sanz12jnp, statistics},
    number = {9},
    pages = {1057--1069},
    priority = {5},
    publisher = {Pion Ltd},
    title = {Human sensitivity to phase perturbations in natural images: a statistical framework},
    url = {http://www.perceptionweb.com/abstract.cgi?id=p2867},
    volume = {29},
    year = {2000}
}

@article{Rieke95,
    abstract = {Natural sounds, especially communication sounds, have highly structured amplitude and phase spectra. We have quantified how structure in the amplitude spectrum of natural sounds affects coding in primary auditory afferents. Auditory afferents encode stimuli with naturalistic amplitude spectra dramatically better than broad-band stimuli (approximating white noise); the rate at which the spike train carries information about the stimulus is 2-6 times higher for naturalistic sounds. Furthermore, the information rates can reach 90\% of the fundamental limit to information transmission set by the statistics of the spike response. These results indicate that the coding strategy of the auditory nerve is matched to the structure of natural sounds; this `tuning' allows afferent spike trains to provide higher processing centres with a more complete description of the sensory world.},
    author = {Rieke, F. and Bodnar, D. A. and Bialek, W.},
    citeulike-article-id = {9277462},
    citeulike-linkout-0 = {http://dx.doi.org/10.2307/50104},
    citeulike-linkout-1 = {http://www.jstor.org/stable/50104},
    date-added = {2011-05-11 17:22:29},
    doi = {10.2307/50104},
    issn = {09628452},
    journal = {P. Roy. Soc. Lond. B Bio.},
    keywords = {motion\_clouds, sanz12jnp},
    number = {1365},
    priority = {4},
    publisher = {The Royal Society},
    title = {Naturalistic Stimuli Increase the Rate and Efficiency of Information Transmission by Primary Auditory Afferents},
    url = {http://dx.doi.org/10.2307/50104},
    volume = {262},
    year = {1995}
}

@article{Masson12,
    abstract = {Short-latency ocular following are reflexive, tracking eye movements that are observed in human and non-human primates in response to a sudden and brief translation of the image. Initial, open-loop part of the eye acceleration reflects many of the properties attributed to low-level motion processing. We review a very large set of behavioral data demonstrating several key properties of motion detection and integration stages and their dynamics. We propose that these properties can be modeled as a behavioral receptive field exhibiting linear and nonlinear mechanisms responsible for context-dependent spatial integration and gain control. Functional models similar to that used for describing neuronal properties of receptive fields can then be applied successfully.},
    author = {Masson, Guillaume S. and Perrinet, Laurent U.},
    citeulike-article-id = {9075176},
    citeulike-linkout-0 = {http://dx.doi.org/10.1016/j.neubiorev.2011.03.009},
    citeulike-linkout-1 = {http://view.ncbi.nlm.nih.gov/pubmed/21421006},
    citeulike-linkout-2 = {http://www.hubmed.org/display.cgi?uids=21421006},
    date-added = {2011-03-29 09:44:54},
    day = {21},
    doi = {10.1016/j.neubiorev.2011.03.009},
    issn = {0149-7634},
    journal = {Neuroscience \& Biobehavioral Reviews},
    keywords = {behavioral\_receptive\_field, eye\_movements, motion\_clouds, motion\_estimation, perrinet12pred, primate, sanz12jnp, tracking},
    month = jan,
    number = {1},
    pages = {1--25},
    pmid = {21421006},
    priority = {0},
    title = {The behavioral receptive field underlying motion integration for primate tracking eye movements},
    url = {http://dx.doi.org/10.1016/j.neubiorev.2011.03.009},
    volume = {36},
    year = {2012}
}

@article{Pei11,
    abstract = {How are local motion signals integrated to form a global motion percept? We investigate the neural mechanisms of tactile motion integration by presenting tactile gratings and plaids to the fingertips of monkeys, using the tactile analogue of a visual monitor and recording the responses evoked in somatosensory cortical neurons. The perceived directions of the gratings and plaids are measured in parallel psychophysical experiments. We identify a population of somatosensory neurons that exhibit integration properties comparable to those induced by analogous visual stimuli in area {MT} and find that these neural responses account for the perceived direction of the stimuli across all stimulus conditions tested. The preferred direction of the neurons and the perceived direction of the stimuli can be predicted from the weighted average of the directions of the individual stimulus features, highlighting that the somatosensory system implements a vector average mechanism to compute tactile motion direction that bears striking similarities to its visual counterpart.},
    author = {Pei, Yu-Cheng and Hsiao, Steven S. and Craig, James C. and Bensmaia, Sliman J.},
    citeulike-article-id = {8802643},
    citeulike-linkout-0 = {http://dx.doi.org/10.1016/j.neuron.2010.12.033},
    citeulike-linkout-1 = {http://www.ncbi.nlm.nih.gov/pmc/articles/PMC3052381/},
    citeulike-linkout-2 = {http://view.ncbi.nlm.nih.gov/pubmed/21315263},
    citeulike-linkout-3 = {http://www.hubmed.org/display.cgi?uids=21315263},
    comment = {* read-out mechanism:"the somatosensory system implements a vector average mechanism "},
    date-added = {2011-03-18 13:06:02},
    day = {10},
    doi = {10.1016/j.neuron.2010.12.033},
    issn = {1097-4199},
    journal = {Neuron},
    keywords = {motion, motion\_integration, perrinet12pred, sanz12jnp, tactile},
    month = feb,
    number = {3},
    pages = {536--47},
    pmcid = {PMC3052381},
    pmid = {21315263},
    priority = {0},
    title = {Neural mechanisms of tactile motion integration in somatosensory cortex},
    url = {http://dx.doi.org/10.1016/j.neuron.2010.12.033},
    volume = {69},
    year = {2011}
}

@article{Perrinet10shl,
    abstract = {Neurons in the input layer of primary visual cortex in primates develop edge-like receptive fields. One approach to understanding the emergence of this response is to state that neural activity has to efficiently represent sensory data with respect to the statistics of natural scenes. Furthermore, it is believed that such an efficient coding is achieved using a competition across neurons so as to generate a sparse representation, that is, where a relatively small number of neurons are simultaneously active. Indeed, different models of sparse coding, coupled with Hebbian learning and homeostasis, have been proposed that successfully match the observed emergent response. However, the specific role of homeostasis in learning such sparse representations is still largely unknown. By quantitatively assessing the efficiency of the neural representation during learning, we derive a cooperative homeostasis mechanism that optimally tunes the competition between neurons within the sparse coding algorithm. We apply this homeostasis while learning small patches taken from natural images and compare its efficiency with state-of-the-art algorithms. Results show that while different sparse coding algorithms give similar coding results, the homeostasis provides an optimal balance for the representation of natural images within the population of neurons. Competition in sparse coding is optimized when it is fair. By contributing to optimizing statistical competition across neurons, homeostasis is crucial in providing a more efficient solution to the emergence of independent components.},
    author = {Perrinet, Laurent U.},
    citeulike-article-id = {7158387},
    citeulike-linkout-0 = {http://www.incm.cnrs-mrs.fr/LaurentPerrinet/Publications/Perrinet10shl},
    citeulike-linkout-1 = {http://dx.doi.org/10.1162/neco.2010.05-08-795},
    citeulike-linkout-2 = {http://www.mitpressjournals.org/doi/abs/10.1162/neco.2010.05-08-795},
    citeulike-linkout-3 = {http://view.ncbi.nlm.nih.gov/pubmed/20235818},
    citeulike-linkout-4 = {http://www.hubmed.org/display.cgi?uids=20235818},
    date-added = {2011-03-16 13:52:51},
    day = {17},
    doi = {10.1162/neco.2010.05-08-795},
    issn = {1530-888X},
    journal = {Neural Computation},
    keywords = {assofield, homeostasis, khoei12jpp, overcomplete\_dictionaries, perrinet10shl, perrinet11sfn, perrinet12pred, sanz12jnp, sparse\_coding, sparse\_hebbian\_learning, sparse\_spike\_coding},
    month = jul,
    number = {7},
    pages = {1812--1836},
    pmid = {20235818},
    priority = {0},
    publisher = {MIT Press},
    title = {Role of Homeostasis in Learning Sparse Representations},
    url = {http://www.incm.cnrs-mrs.fr/LaurentPerrinet/Publications/Perrinet10shl},
    volume = {22},
    year = {2010}
}

@article{Derrington04,
    abstract = {Psychophysical experiments on feature tracking suggest that most of our sensitivity to chromatic motion and to second-order motion depends on feature tracking. There is no reason to suppose that the visual system contains motion sensors dedicated to the analysis of second-order motion. Current psychophysical and physiological data indicate that local motion sensors are selective for orientation and spatial frequency but they do not eliminate any of the three main models---the Reichardt detector, the motion-energy filter, and gradient-based sensors. Both psychophysical and physiological data suggest that both broadly oriented and narrowly oriented motion sensors are important in the early analysis of motion in two dimensions.},
    author = {Derrington, A. M. and Allen, H. A. and Delicato, L. S.},
    citeulike-article-id = {4066681},
    citeulike-linkout-0 = {http://www.annualreviews.org/doi/abs/10.1146/annurev.psych.55.090902.141903},
    citeulike-linkout-1 = {http://dx.doi.org/10.1146/annurev.psych.55.090902.141903},
    citeulike-linkout-2 = {http://view.ncbi.nlm.nih.gov/pubmed/14744214},
    citeulike-linkout-3 = {http://www.hubmed.org/display.cgi?uids=14744214},
    comment = { * p. 183  "vection (is) the visually induced sensation that it is the observer, rather than objects in the external world, that is moving." > implies an extra-retinal signal

---=note-separator=---
* p. 183 "vection (is) the visually induced sensation that it is the observer, rather than objects in the external world, that is moving." > implies an extra-retinal signal},
    date-added = {2011-02-21 13:53:36},
    doi = {10.1146/annurev.psych.55.090902.141903},
    issn = {0066-4308},
    journal = {Annu. Rev. Psychol.},
    keywords = {motion\_2nd-order, motion\_clouds, motion\_energy\_model, review, sanz12jnp},
    number = {1},
    pages = {181--205},
    pmid = {14744214},
    priority = {4},
    title = {Visual Mechanisms of Motion Analysis and Motion Perception},
    url = {http://dx.doi.org/10.1146/annurev.psych.55.090902.141903},
    volume = {55},
    year = {2004}
}

@article{Adelson85,
    abstract = {A motion sequence may be represented as a single pattern in x-y-t space; a velocity of motion corresponds to a three-dimensional orientation in this space. Motion sinformation can be extracted by a system that responds to the oriented spatiotemporal energy. We discuss a class of models for human motion mechanisms in which the first stage consists of linear filters that are oriented in space-time and tuned in spatial frequency. The outputs of quadrature pairs of such filters are squared and summed to give a measure of motion energy. These responses are then fed into an opponent stage. Energy models can be built from elements that are consistent with known physiology and psychophysics, and they permit a qualitative understanding of a variety of motion phenomena.},
    author = {Adelson, Edward H. and Bergen, James R.},
    citeulike-article-id = {2202069},
    citeulike-linkout-0 = {http://dx.doi.org/10.1364/JOSAA.2.000284},
    citeulike-linkout-1 = {http://www.opticsinfobase.org/abstract.cfm?id=1945},
    date-added = {2011-02-21 13:50:14},
    day = {1},
    doi = {10.1364/JOSAA.2.000284},
    journal = {Journal of {O}ptical {S}ociety of {A}merica, {A}.},
    keywords = {energy, khoei12jpp, model, motion, motion-clouds, motion-energy-model, perrinet12pred, sanz12jnp},
    month = feb,
    number = {2},
    pages = {284--99},
    priority = {0},
    publisher = {OSA},
    title = {Spatiotemporal energy models for the perception of motion},
    url = {http://dx.doi.org/10.1364/JOSAA.2.000284},
    volume = {2},
    year = {1985}
}

@article{Wang11,
    abstract = {Motion-in-depth causes changes in the size of retinal images in addition to producing optic flow patterns. A previous psychophysical study showed that human subjects can perceive expansion motion in texture stimuli that exhibit increases in the scale of image elements but no consistent optic flow pattern. The neural mechanisms by which the scale-change information is processed remain unknown. Here, we measured the responses of cat V1 and the lateral geniculate nucleus ({LGN}) neurons to a sequence of random images whose spatial frequency spectrum changed over time (i.e., average spatial scale expanded or contracted). We found that V1 neurons exhibit direction sensitivity to scale changes, with more cells preferring expansion than contraction motion. This direction sensitivity can be partly accounted for by the spectrotemporal receptive field of V1 neurons. Comparison of the direction sensitivity between V1 and {LGN} neurons showed that the sensitivity in V1 may originate from {LGN} neurons. Repetitive stimulation with expansion or contraction motion can decrease the sensitivity to the adapted direction in V1, and the effect can be transferred interocularly, suggesting that intracortical connections may be critically involved in the adaptation. Together, our results suggest that direction sensitivity to scale change in V1 may contribute to motion-in-depth processing.},
    author = {Wang, C. and Yao, H.},
    citeulike-article-id = {7843125},
    citeulike-linkout-0 = {http://dx.doi.org/10.1093/cercor/bhq176},
    citeulike-linkout-1 = {http://cercor.oxfordjournals.org/content/early/2010/09/13/cercor.bhq176.abstract},
    citeulike-linkout-2 = {http://cercor.oxfordjournals.org/content/early/2010/09/13/cercor.bhq176.full.pdf},
    citeulike-linkout-3 = {http://view.ncbi.nlm.nih.gov/pubmed/20841322},
    citeulike-linkout-4 = {http://www.hubmed.org/display.cgi?uids=20841322},
    date-added = {2011-01-24 13:25:54},
    day = {1},
    doi = {10.1093/cercor/bhq176},
    journal = {Cereb Cortex},
    keywords = {area-v1, motion, motion\_estimation, sanz12jnp},
    month = apr,
    number = {4},
    pages = {964--973},
    pmid = {20841322},
    priority = {2},
    title = {Sensitivity of {V1} Neurons to Direction of Spectral Motion},
    url = {http://dx.doi.org/10.1093/cercor/bhq176},
    volume = {21},
    year = {2011}
}

@article{Schrater00,
    abstract = {Visual motion is processed by neurons in primary visual cortex that are sensitive to spatial orientation and speed. Many models of local velocity computation are based on a second stage that pools the outputs of first-stage neurons selective for different orientations, but the nature of this pooling remains controversial. In a human psychophysical detection experiment, we found near-perfect summation of image energy when it was distributed uniformly across all orientations, but poor summation when it was concentrated in specific orientation bands. The data are consistent with a model that integrates uniformly over all orientations, even when this strategy is sub-optimal.},
    address = {Department of Psychology, University of Minnesota, N218 Elliott Hall, 75 E. River Dr., Minneapolis, Minnesota 55455, USA. schrater@eye.psych.umn.edu},
    author = {Schrater, P. R. and Knill, D. C. and Simoncelli, Eero P.},
    citeulike-article-id = {2799051},
    citeulike-linkout-0 = {http://dx.doi.org/10.1038/71134},
    citeulike-linkout-1 = {http://dx.doi.org/10.1038/nn0100\_64},
    citeulike-linkout-2 = {http://view.ncbi.nlm.nih.gov/pubmed/10607396},
    citeulike-linkout-3 = {http://www.hubmed.org/display.cgi?uids=10607396},
    date-added = {2011-01-24 13:02:25},
    day = {01},
    doi = {10.1038/71134},
    issn = {1097-6256},
    journal = {Nat. Neurosci.},
    keywords = {global\_motion, motion, motion\_clouds, motion\_energy\_model, motion\_estimation, motion\_patterns, sanz12jnp},
    month = jan,
    number = {1},
    pages = {64--8},
    pmid = {10607396},
    priority = {2},
    publisher = {Nature Publishing Group},
    title = {Mechanisms of Visual Motion Detection},
    url = {http://dx.doi.org/10.1038/71134},
    volume = {3},
    year = {2000}
}

@article{Scarfe10,
    abstract = {The perceived position of stationary objects can appear shifted in space due to the presence of motion in another part of the visual field (motion drag). We investigated this phenomenon with global motion Gabor arrays. These arrays consist of randomly oriented Gabors (Gaussian windowed sinusoidal luminance modulations) whose speed is set such that the normal component of the individual Gabor's motion is consistent with a single {2D} global velocity. Global motion arrays were shown to alter the perceived position of nearby stationary objects. The size of this shift was the same as that induced by arrays of Gabors uniformly oriented in the direction of global motion and drifting at the global motion speed. Both types of array were found to be robust to large changes in array density and exhibited the same time course of effect. The motion drag induced by the global motion arrays was consistent with the estimated {2D} global velocity, rather than by the component of the local velocities in the global motion direction. This suggests that the motion signal that induces motion drag originates at or after a stage at which local motion signals have been integrated to produce a global motion estimate.},
    author = {Scarfe, P. and Johnston, A.},
    citeulike-article-id = {8511654},
    citeulike-linkout-0 = {http://view.ncbi.nlm.nih.gov/pubmed/20616138},
    citeulike-linkout-1 = {http://view.ncbi.nlm.nih.gov/pubmed/20616138},
    citeulike-linkout-2 = {http://www.hubmed.org/display.cgi?uids=20616138},
    date-added = {2011-01-06 14:42:55},
    issn = {1534-7362},
    journal = {J. Vis.},
    keywords = {gabor, global\_motion, motion, sanz12jnp},
    number = {5},
    pmid = {20616138},
    priority = {0},
    title = {Motion drag induced by global motion Gabor arrays.},
    url = {http://view.ncbi.nlm.nih.gov/pubmed/20616138},
    volume = {10},
    year = {2010}
}

@article{Bradley08,
    abstract = {Computational neuroscience combines theory and experiment to shed light on the principles and mechanisms of neural computation. This approach has been highly fruitful in the ongoing effort to understand velocity computation by the primate visual system. This Review describes the success of spatiotemporal-energy models in representing local-velocity detection. It shows why local-velocity measurements tend to differ from the velocity of the object as a whole. Certain cells in the middle temporal area are thought to solve this problem by combining local-velocity estimates to compute the overall pattern velocity. The Review discusses different models for how this might occur and experiments that test these models. Although no model is yet firmly established, evidence suggests that computing pattern velocity from local-velocity estimates involves simple operations in the spatiotemporal frequency domain.},
    author = {Bradley, David C. and Goyal, Manu S.},
    citeulike-article-id = {3139443},
    citeulike-linkout-0 = {http://dx.doi.org/10.1038/nrn2472},
    citeulike-linkout-1 = {http://dx.doi.org/10.1038/nrn2472},
    date-added = {2010-11-22 19:59:24},
    day = {13},
    doi = {10.1038/nrn2472},
    issn = {1471-003X},
    journal = {Nat. Neurosci.},
    keywords = {motion, sanz12jnp},
    month = aug,
    number = {9},
    pages = {686--695},
    priority = {5},
    publisher = {Nature Publishing Group},
    title = {Velocity computation in the primate visual system},
    url = {http://dx.doi.org/10.1038/nrn2472},
    volume = {9},
    year = {2008}
}

@article{Albright84,
    author = {Albright, Thomas D.},
    citeulike-article-id = {3887737},
    date-added = {2009-01-15 09:39:54},
    journal = {Journal of {N}europhysiology},
    keywords = {1d-motion-signals, aperture-problem, area-mt, bars, direction-selectivity, integration, motion, motion-integration, orientation, orientation-selectivity, pattern, pattern-responses, perrinet12pred, sanz12jnp, selectivity},
    pages = {1106--30},
    priority = {2},
    title = {Direction and orientation selectivity of neurons in visual area {MT} of the macaque},
    volume = {52},
    year = {1984}
}